1 /* Copyright 2012 STEC, Inc.
3 * This file is licensed under the terms of the 3-clause
4 * BSD License (http://opensource.org/licenses/BSD-3-Clause)
5 * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
6 * at your option. Both licenses are also available in the LICENSE file
7 * distributed with this project. This file may not be copied, modified,
8 * or distributed except in accordance with those terms.
9 * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
10 * Initial Driver Design!
11 * Thomas Swann <tswann@stec-inc.com>
13 * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
14 * biomode implementation.
15 * Akhil Bhansali <abhansali@stec-inc.com>
16 * Added support for DISCARD / FLUSH and FUA.
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/pci.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/blkdev.h>
26 #include <linux/sched.h>
27 #include <linux/interrupt.h>
28 #include <linux/compiler.h>
29 #include <linux/workqueue.h>
30 #include <linux/bitops.h>
31 #include <linux/delay.h>
32 #include <linux/time.h>
33 #include <linux/hdreg.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/completion.h>
36 #include <linux/scatterlist.h>
37 #include <linux/version.h>
38 #include <linux/err.h>
39 #include <linux/scatterlist.h>
40 #include <linux/aer.h>
41 #include <linux/ctype.h>
42 #include <linux/wait.h>
43 #include <linux/uio.h>
44 #include <scsi/scsi.h>
47 #include <linux/uaccess.h>
48 #include <asm/unaligned.h>
50 #include "skd_s1120.h"
52 static int skd_dbg_level;
53 static int skd_isr_comp_limit = 4;
59 STEC_LINK_UNKNOWN = 0xFF
63 SKD_FLUSH_INITIALIZER,
64 SKD_FLUSH_ZERO_SIZE_FIRST,
65 SKD_FLUSH_DATA_SECOND,
68 #define SKD_ASSERT(expr) \
70 if (unlikely(!(expr))) { \
71 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
72 # expr, __FILE__, __func__, __LINE__); \
76 #define DRV_NAME "skd"
77 #define DRV_VERSION "2.2.1"
78 #define DRV_BUILD_ID "0260"
79 #define PFX DRV_NAME ": "
80 #define DRV_BIN_VERSION 0x100
81 #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
83 MODULE_AUTHOR("bug-reports: support@stec-inc.com");
84 MODULE_LICENSE("Dual BSD/GPL");
86 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
87 MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
89 #define PCI_VENDOR_ID_STEC 0x1B39
90 #define PCI_DEVICE_ID_S1120 0x0001
92 #define SKD_FUA_NV (1 << 1)
93 #define SKD_MINORS_PER_DEVICE 16
95 #define SKD_MAX_QUEUE_DEPTH 200u
97 #define SKD_PAUSE_TIMEOUT (5 * 1000)
99 #define SKD_N_FITMSG_BYTES (512u)
101 #define SKD_N_SPECIAL_CONTEXT 32u
102 #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
104 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
105 * 128KB limit. That allows 4096*4K = 16M xfer size
107 #define SKD_N_SG_PER_REQ_DEFAULT 256u
108 #define SKD_N_SG_PER_SPECIAL 256u
110 #define SKD_N_COMPLETION_ENTRY 256u
111 #define SKD_N_READ_CAP_BYTES (8u)
113 #define SKD_N_INTERNAL_BYTES (512u)
115 /* 5 bits of uniqifier, 0xF800 */
116 #define SKD_ID_INCR (0x400)
117 #define SKD_ID_TABLE_MASK (3u << 8u)
118 #define SKD_ID_RW_REQUEST (0u << 8u)
119 #define SKD_ID_INTERNAL (1u << 8u)
120 #define SKD_ID_SPECIAL_REQUEST (2u << 8u)
121 #define SKD_ID_FIT_MSG (3u << 8u)
122 #define SKD_ID_SLOT_MASK 0x00FFu
123 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
125 #define SKD_N_TIMEOUT_SLOT 4u
126 #define SKD_TIMEOUT_SLOT_MASK 3u
128 #define SKD_N_MAX_SECTORS 2048u
130 #define SKD_MAX_RETRIES 2u
132 #define SKD_TIMER_SECONDS(seconds) (seconds)
133 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
135 #define INQ_STD_NBYTES 36
137 enum skd_drvr_state {
141 SKD_DRVR_STATE_STARTING,
142 SKD_DRVR_STATE_ONLINE,
143 SKD_DRVR_STATE_PAUSING,
144 SKD_DRVR_STATE_PAUSED,
145 SKD_DRVR_STATE_DRAINING_TIMEOUT,
146 SKD_DRVR_STATE_RESTARTING,
147 SKD_DRVR_STATE_RESUMING,
148 SKD_DRVR_STATE_STOPPING,
149 SKD_DRVR_STATE_FAULT,
150 SKD_DRVR_STATE_DISAPPEARED,
151 SKD_DRVR_STATE_PROTOCOL_MISMATCH,
152 SKD_DRVR_STATE_BUSY_ERASE,
153 SKD_DRVR_STATE_BUSY_SANITIZE,
154 SKD_DRVR_STATE_BUSY_IMMINENT,
155 SKD_DRVR_STATE_WAIT_BOOT,
156 SKD_DRVR_STATE_SYNCING,
159 #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
160 #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
161 #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
162 #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
163 #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
164 #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
165 #define SKD_START_WAIT_SECONDS 90u
171 SKD_REQ_STATE_COMPLETED,
172 SKD_REQ_STATE_TIMEOUT,
173 SKD_REQ_STATE_ABORTED,
176 enum skd_fit_msg_state {
181 enum skd_check_status_action {
182 SKD_CHECK_STATUS_REPORT_GOOD,
183 SKD_CHECK_STATUS_REPORT_SMART_ALERT,
184 SKD_CHECK_STATUS_REQUEUE_REQUEST,
185 SKD_CHECK_STATUS_REPORT_ERROR,
186 SKD_CHECK_STATUS_BUSY_IMMINENT,
189 struct skd_fitmsg_context {
190 enum skd_fit_msg_state state;
192 struct skd_fitmsg_context *next;
201 dma_addr_t mb_dma_address;
204 struct skd_request_context {
205 enum skd_req_state state;
207 struct skd_request_context *next;
217 struct scatterlist *sg;
221 struct fit_sg_descriptor *sksg_list;
222 dma_addr_t sksg_dma_address;
224 struct fit_completion_entry_v1 completion;
226 struct fit_comp_error_info err_info;
229 #define SKD_DATA_DIR_HOST_TO_CARD 1
230 #define SKD_DATA_DIR_CARD_TO_HOST 2
232 struct skd_special_context {
233 struct skd_request_context req;
238 dma_addr_t db_dma_address;
241 dma_addr_t mb_dma_address;
254 struct sg_iovec *iov;
255 struct sg_iovec no_iov_iov;
257 struct skd_special_context *skspcl;
260 typedef enum skd_irq_type {
266 #define SKD_MAX_BARS 2
269 volatile void __iomem *mem_map[SKD_MAX_BARS];
270 resource_size_t mem_phys[SKD_MAX_BARS];
271 u32 mem_size[SKD_MAX_BARS];
273 skd_irq_type_t irq_type;
275 struct skd_msix_entry *msix_entries;
277 struct pci_dev *pdev;
278 int pcie_error_reporting_is_enabled;
281 struct gendisk *disk;
282 struct request_queue *queue;
283 struct device *class_dev;
287 atomic_t device_count;
293 enum skd_drvr_state state;
297 u32 cur_max_queue_depth;
298 u32 queue_low_water_mark;
299 u32 dev_max_queue_depth;
301 u32 num_fitmsg_context;
304 u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
306 struct skd_fitmsg_context *skmsg_free_list;
307 struct skd_fitmsg_context *skmsg_table;
309 struct skd_request_context *skreq_free_list;
310 struct skd_request_context *skreq_table;
312 struct skd_special_context *skspcl_free_list;
313 struct skd_special_context *skspcl_table;
315 struct skd_special_context internal_skspcl;
316 u32 read_cap_blocksize;
317 u32 read_cap_last_lba;
318 int read_cap_is_valid;
319 int inquiry_is_valid;
320 u8 inq_serial_num[13]; /*12 chars plus null term */
321 u8 id_str[80]; /* holds a composite name (pci + sernum) */
325 struct fit_completion_entry_v1 *skcomp_table;
326 struct fit_comp_error_info *skerr_table;
327 dma_addr_t cq_dma_address;
329 wait_queue_head_t waitq;
331 struct timer_list timer;
342 u32 connect_time_stamp;
344 #define SKD_MAX_CONNECT_RETRIES 16
350 struct work_struct completion_worker;
353 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
354 #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
355 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
357 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
361 if (likely(skdev->dbg_level < 2))
362 return readl(skdev->mem_map[1] + offset);
365 val = readl(skdev->mem_map[1] + offset);
367 pr_debug("%s:%s:%d offset %x = %x\n",
368 skdev->name, __func__, __LINE__, offset, val);
374 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
377 if (likely(skdev->dbg_level < 2)) {
378 writel(val, skdev->mem_map[1] + offset);
382 writel(val, skdev->mem_map[1] + offset);
384 pr_debug("%s:%s:%d offset %x = %x\n",
385 skdev->name, __func__, __LINE__, offset, val);
389 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
392 if (likely(skdev->dbg_level < 2)) {
393 writeq(val, skdev->mem_map[1] + offset);
397 writeq(val, skdev->mem_map[1] + offset);
399 pr_debug("%s:%s:%d offset %x = %016llx\n",
400 skdev->name, __func__, __LINE__, offset, val);
405 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
406 static int skd_isr_type = SKD_IRQ_DEFAULT;
408 module_param(skd_isr_type, int, 0444);
409 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
410 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
412 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
413 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
415 module_param(skd_max_req_per_msg, int, 0444);
416 MODULE_PARM_DESC(skd_max_req_per_msg,
417 "Maximum SCSI requests packed in a single message."
418 " (1-14, default==1)");
420 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
421 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
422 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
424 module_param(skd_max_queue_depth, int, 0444);
425 MODULE_PARM_DESC(skd_max_queue_depth,
426 "Maximum SCSI requests issued to s1120."
427 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
429 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
430 module_param(skd_sgs_per_request, int, 0444);
431 MODULE_PARM_DESC(skd_sgs_per_request,
432 "Maximum SG elements per block request."
433 " (1-4096, default==256)");
435 static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
436 module_param(skd_max_pass_thru, int, 0444);
437 MODULE_PARM_DESC(skd_max_pass_thru,
438 "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
440 module_param(skd_dbg_level, int, 0444);
441 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
443 module_param(skd_isr_comp_limit, int, 0444);
444 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
446 /* Major device number dynamically assigned. */
447 static u32 skd_major;
449 static void skd_destruct(struct skd_device *skdev);
450 static const struct block_device_operations skd_blockdev_ops;
451 static void skd_send_fitmsg(struct skd_device *skdev,
452 struct skd_fitmsg_context *skmsg);
453 static void skd_send_special_fitmsg(struct skd_device *skdev,
454 struct skd_special_context *skspcl);
455 static void skd_request_fn(struct request_queue *rq);
456 static void skd_end_request(struct skd_device *skdev,
457 struct skd_request_context *skreq, int error);
458 static int skd_preop_sg_list(struct skd_device *skdev,
459 struct skd_request_context *skreq);
460 static void skd_postop_sg_list(struct skd_device *skdev,
461 struct skd_request_context *skreq);
463 static void skd_restart_device(struct skd_device *skdev);
464 static int skd_quiesce_dev(struct skd_device *skdev);
465 static int skd_unquiesce_dev(struct skd_device *skdev);
466 static void skd_release_special(struct skd_device *skdev,
467 struct skd_special_context *skspcl);
468 static void skd_disable_interrupts(struct skd_device *skdev);
469 static void skd_isr_fwstate(struct skd_device *skdev);
470 static void skd_recover_requests(struct skd_device *skdev, int requeue);
471 static void skd_soft_reset(struct skd_device *skdev);
473 static const char *skd_name(struct skd_device *skdev);
474 const char *skd_drive_state_to_str(int state);
475 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
476 static void skd_log_skdev(struct skd_device *skdev, const char *event);
477 static void skd_log_skmsg(struct skd_device *skdev,
478 struct skd_fitmsg_context *skmsg, const char *event);
479 static void skd_log_skreq(struct skd_device *skdev,
480 struct skd_request_context *skreq, const char *event);
483 *****************************************************************************
484 * READ/WRITE REQUESTS
485 *****************************************************************************
487 static void skd_fail_all_pending(struct skd_device *skdev)
489 struct request_queue *q = skdev->queue;
493 req = blk_peek_request(q);
496 blk_start_request(req);
497 __blk_end_request_all(req, -EIO);
502 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
503 int data_dir, unsigned lba,
506 if (data_dir == READ)
507 scsi_req->cdb[0] = 0x28;
509 scsi_req->cdb[0] = 0x2a;
511 scsi_req->cdb[1] = 0;
512 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
513 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
514 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
515 scsi_req->cdb[5] = (lba & 0xff);
516 scsi_req->cdb[6] = 0;
517 scsi_req->cdb[7] = (count & 0xff00) >> 8;
518 scsi_req->cdb[8] = count & 0xff;
519 scsi_req->cdb[9] = 0;
523 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
524 struct skd_request_context *skreq)
526 skreq->flush_cmd = 1;
528 scsi_req->cdb[0] = 0x35;
529 scsi_req->cdb[1] = 0;
530 scsi_req->cdb[2] = 0;
531 scsi_req->cdb[3] = 0;
532 scsi_req->cdb[4] = 0;
533 scsi_req->cdb[5] = 0;
534 scsi_req->cdb[6] = 0;
535 scsi_req->cdb[7] = 0;
536 scsi_req->cdb[8] = 0;
537 scsi_req->cdb[9] = 0;
540 static void skd_request_fn_not_online(struct request_queue *q);
542 static void skd_request_fn(struct request_queue *q)
544 struct skd_device *skdev = q->queuedata;
545 struct skd_fitmsg_context *skmsg = NULL;
546 struct fit_msg_hdr *fmh = NULL;
547 struct skd_request_context *skreq;
548 struct request *req = NULL;
549 struct skd_scsi_request *scsi_req;
550 unsigned long io_flags;
563 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
564 skd_request_fn_not_online(q);
568 if (blk_queue_stopped(skdev->queue)) {
569 if (skdev->skmsg_free_list == NULL ||
570 skdev->skreq_free_list == NULL ||
571 skdev->in_flight >= skdev->queue_low_water_mark)
572 /* There is still some kind of shortage */
575 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
580 * - There are no more native requests
581 * - There are already the maximum number of requests in progress
582 * - There are no more skd_request_context entries
583 * - There are no more FIT msg buffers
589 req = blk_peek_request(q);
591 /* Are there any native requests to start? */
595 lba = (u32)blk_rq_pos(req);
596 count = blk_rq_sectors(req);
597 data_dir = rq_data_dir(req);
598 io_flags = req->cmd_flags;
600 if (req_op(req) == REQ_OP_FLUSH)
603 if (io_flags & REQ_FUA)
606 pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
607 "count=%u(0x%x) dir=%d\n",
608 skdev->name, __func__, __LINE__,
609 req, lba, lba, count, count, data_dir);
611 /* At this point we know there is a request */
613 /* Are too many requets already in progress? */
614 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
615 pr_debug("%s:%s:%d qdepth %d, limit %d\n",
616 skdev->name, __func__, __LINE__,
617 skdev->in_flight, skdev->cur_max_queue_depth);
621 /* Is a skd_request_context available? */
622 skreq = skdev->skreq_free_list;
624 pr_debug("%s:%s:%d Out of req=%p\n",
625 skdev->name, __func__, __LINE__, q);
628 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
629 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
631 /* Now we check to see if we can get a fit msg */
633 if (skdev->skmsg_free_list == NULL) {
634 pr_debug("%s:%s:%d Out of msg\n",
635 skdev->name, __func__, __LINE__);
640 skreq->flush_cmd = 0;
642 skreq->sg_byte_count = 0;
645 * OK to now dequeue request from q.
647 * At this point we are comitted to either start or reject
648 * the native request. Note that skd_request_context is
649 * available but is still at the head of the free list.
651 blk_start_request(req);
653 skreq->fitmsg_id = 0;
655 /* Either a FIT msg is in progress or we have to start one. */
657 /* Are there any FIT msg buffers available? */
658 skmsg = skdev->skmsg_free_list;
660 pr_debug("%s:%s:%d Out of msg skdev=%p\n",
661 skdev->name, __func__, __LINE__,
665 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
666 SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
668 skdev->skmsg_free_list = skmsg->next;
670 skmsg->state = SKD_MSG_STATE_BUSY;
671 skmsg->id += SKD_ID_INCR;
673 /* Initialize the FIT msg header */
674 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
675 memset(fmh, 0, sizeof(*fmh));
676 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
677 skmsg->length = sizeof(*fmh);
680 skreq->fitmsg_id = skmsg->id;
683 * Note that a FIT msg may have just been started
684 * but contains no SoFIT requests yet.
688 * Transcode the request, checking as we go. The outcome of
689 * the transcoding is represented by the error variable.
691 cmd_ptr = &skmsg->msg_buf[skmsg->length];
692 memset(cmd_ptr, 0, 32);
694 be_lba = cpu_to_be32(lba);
695 be_count = cpu_to_be32(count);
696 be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
697 cmdctxt = skreq->id + SKD_ID_INCR;
700 scsi_req->hdr.tag = cmdctxt;
701 scsi_req->hdr.sg_list_dma_address = be_dmaa;
703 if (data_dir == READ)
704 skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
706 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
708 if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
709 skd_prep_zerosize_flush_cdb(scsi_req, skreq);
710 SKD_ASSERT(skreq->flush_cmd == 1);
713 skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
717 scsi_req->cdb[1] |= SKD_FUA_NV;
722 error = skd_preop_sg_list(skdev, skreq);
726 * Complete the native request with error.
727 * Note that the request context is still at the
728 * head of the free list, and that the SoFIT request
729 * was encoded into the FIT msg buffer but the FIT
730 * msg length has not been updated. In short, the
731 * only resource that has been allocated but might
732 * not be used is that the FIT msg could be empty.
734 pr_debug("%s:%s:%d error Out\n",
735 skdev->name, __func__, __LINE__);
736 skd_end_request(skdev, skreq, error);
741 scsi_req->hdr.sg_list_len_bytes =
742 cpu_to_be32(skreq->sg_byte_count);
744 /* Complete resource allocations. */
745 skdev->skreq_free_list = skreq->next;
746 skreq->state = SKD_REQ_STATE_BUSY;
747 skreq->id += SKD_ID_INCR;
749 skmsg->length += sizeof(struct skd_scsi_request);
750 fmh->num_protocol_cmds_coalesced++;
753 * Update the active request counts.
754 * Capture the timeout timestamp.
756 skreq->timeout_stamp = skdev->timeout_stamp;
757 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
758 skdev->timeout_slot[timo_slot]++;
760 pr_debug("%s:%s:%d req=0x%x busy=%d\n",
761 skdev->name, __func__, __LINE__,
762 skreq->id, skdev->in_flight);
765 * If the FIT msg buffer is full send it.
767 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
768 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
769 skd_send_fitmsg(skdev, skmsg);
776 * Is a FIT msg in progress? If it is empty put the buffer back
777 * on the free list. If it is non-empty send what we got.
778 * This minimizes latency when there are fewer requests than
779 * what fits in a FIT msg.
782 /* Bigger than just a FIT msg header? */
783 if (skmsg->length > sizeof(struct fit_msg_hdr)) {
784 pr_debug("%s:%s:%d sending msg=%p, len %d\n",
785 skdev->name, __func__, __LINE__,
786 skmsg, skmsg->length);
787 skd_send_fitmsg(skdev, skmsg);
790 * The FIT msg is empty. It means we got started
791 * on the msg, but the requests were rejected.
793 skmsg->state = SKD_MSG_STATE_IDLE;
794 skmsg->id += SKD_ID_INCR;
795 skmsg->next = skdev->skmsg_free_list;
796 skdev->skmsg_free_list = skmsg;
803 * If req is non-NULL it means there is something to do but
804 * we are out of a resource.
807 blk_stop_queue(skdev->queue);
810 static void skd_end_request(struct skd_device *skdev,
811 struct skd_request_context *skreq, int error)
813 if (unlikely(error)) {
814 struct request *req = skreq->req;
815 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
816 u32 lba = (u32)blk_rq_pos(req);
817 u32 count = blk_rq_sectors(req);
819 pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
820 skd_name(skdev), cmd, lba, count, skreq->id);
822 pr_debug("%s:%s:%d id=0x%x error=%d\n",
823 skdev->name, __func__, __LINE__, skreq->id, error);
825 __blk_end_request_all(skreq->req, error);
828 static int skd_preop_sg_list(struct skd_device *skdev,
829 struct skd_request_context *skreq)
831 struct request *req = skreq->req;
832 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
833 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
834 struct scatterlist *sg = &skreq->sg[0];
838 skreq->sg_byte_count = 0;
840 /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
841 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
843 n_sg = blk_rq_map_sg(skdev->queue, req, sg);
848 * Map scatterlist to PCI bus addresses.
849 * Note PCI might change the number of entries.
851 n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
855 SKD_ASSERT(n_sg <= skdev->sgs_per_request);
859 for (i = 0; i < n_sg; i++) {
860 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
861 u32 cnt = sg_dma_len(&sg[i]);
862 uint64_t dma_addr = sg_dma_address(&sg[i]);
864 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
865 sgd->byte_count = cnt;
866 skreq->sg_byte_count += cnt;
867 sgd->host_side_addr = dma_addr;
868 sgd->dev_side_addr = 0;
871 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
872 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
874 if (unlikely(skdev->dbg_level > 1)) {
875 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
876 skdev->name, __func__, __LINE__,
877 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
878 for (i = 0; i < n_sg; i++) {
879 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
880 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
881 "addr=0x%llx next=0x%llx\n",
882 skdev->name, __func__, __LINE__,
883 i, sgd->byte_count, sgd->control,
884 sgd->host_side_addr, sgd->next_desc_ptr);
891 static void skd_postop_sg_list(struct skd_device *skdev,
892 struct skd_request_context *skreq)
894 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
895 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
898 * restore the next ptr for next IO request so we
899 * don't have to set it every time.
901 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
902 skreq->sksg_dma_address +
903 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
904 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
907 static void skd_request_fn_not_online(struct request_queue *q)
909 struct skd_device *skdev = q->queuedata;
912 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
914 skd_log_skdev(skdev, "req_not_online");
915 switch (skdev->state) {
916 case SKD_DRVR_STATE_PAUSING:
917 case SKD_DRVR_STATE_PAUSED:
918 case SKD_DRVR_STATE_STARTING:
919 case SKD_DRVR_STATE_RESTARTING:
920 case SKD_DRVR_STATE_WAIT_BOOT:
921 /* In case of starting, we haven't started the queue,
922 * so we can't get here... but requests are
923 * possibly hanging out waiting for us because we
924 * reported the dev/skd0 already. They'll wait
925 * forever if connect doesn't complete.
926 * What to do??? delay dev/skd0 ??
928 case SKD_DRVR_STATE_BUSY:
929 case SKD_DRVR_STATE_BUSY_IMMINENT:
930 case SKD_DRVR_STATE_BUSY_ERASE:
931 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
934 case SKD_DRVR_STATE_BUSY_SANITIZE:
935 case SKD_DRVR_STATE_STOPPING:
936 case SKD_DRVR_STATE_SYNCING:
937 case SKD_DRVR_STATE_FAULT:
938 case SKD_DRVR_STATE_DISAPPEARED:
944 /* If we get here, terminate all pending block requeusts
945 * with EIO and any scsi pass thru with appropriate sense
948 skd_fail_all_pending(skdev);
952 *****************************************************************************
954 *****************************************************************************
957 static void skd_timer_tick_not_online(struct skd_device *skdev);
959 static void skd_timer_tick(ulong arg)
961 struct skd_device *skdev = (struct skd_device *)arg;
964 u32 overdue_timestamp;
965 unsigned long reqflags;
968 if (skdev->state == SKD_DRVR_STATE_FAULT)
969 /* The driver has declared fault, and we want it to
970 * stay that way until driver is reloaded.
974 spin_lock_irqsave(&skdev->lock, reqflags);
976 state = SKD_READL(skdev, FIT_STATUS);
977 state &= FIT_SR_DRIVE_STATE_MASK;
978 if (state != skdev->drive_state)
979 skd_isr_fwstate(skdev);
981 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
982 skd_timer_tick_not_online(skdev);
985 skdev->timeout_stamp++;
986 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
989 * All requests that happened during the previous use of
990 * this slot should be done by now. The previous use was
991 * over 7 seconds ago.
993 if (skdev->timeout_slot[timo_slot] == 0)
996 /* Something is overdue */
997 overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
999 pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
1000 skdev->name, __func__, __LINE__,
1001 skdev->timeout_slot[timo_slot], skdev->in_flight);
1002 pr_err("(%s): Overdue IOs (%d), busy %d\n",
1003 skd_name(skdev), skdev->timeout_slot[timo_slot],
1006 skdev->timer_countdown = SKD_DRAINING_TIMO;
1007 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
1008 skdev->timo_slot = timo_slot;
1009 blk_stop_queue(skdev->queue);
1012 mod_timer(&skdev->timer, (jiffies + HZ));
1014 spin_unlock_irqrestore(&skdev->lock, reqflags);
1017 static void skd_timer_tick_not_online(struct skd_device *skdev)
1019 switch (skdev->state) {
1020 case SKD_DRVR_STATE_IDLE:
1021 case SKD_DRVR_STATE_LOAD:
1023 case SKD_DRVR_STATE_BUSY_SANITIZE:
1024 pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
1025 skdev->name, __func__, __LINE__,
1026 skdev->drive_state, skdev->state);
1027 /* If we've been in sanitize for 3 seconds, we figure we're not
1028 * going to get anymore completions, so recover requests now
1030 if (skdev->timer_countdown > 0) {
1031 skdev->timer_countdown--;
1034 skd_recover_requests(skdev, 0);
1037 case SKD_DRVR_STATE_BUSY:
1038 case SKD_DRVR_STATE_BUSY_IMMINENT:
1039 case SKD_DRVR_STATE_BUSY_ERASE:
1040 pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
1041 skdev->name, __func__, __LINE__,
1042 skdev->state, skdev->timer_countdown);
1043 if (skdev->timer_countdown > 0) {
1044 skdev->timer_countdown--;
1047 pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
1048 skdev->name, __func__, __LINE__,
1049 skdev->state, skdev->timer_countdown);
1050 skd_restart_device(skdev);
1053 case SKD_DRVR_STATE_WAIT_BOOT:
1054 case SKD_DRVR_STATE_STARTING:
1055 if (skdev->timer_countdown > 0) {
1056 skdev->timer_countdown--;
1059 /* For now, we fault the drive. Could attempt resets to
1060 * revcover at some point. */
1061 skdev->state = SKD_DRVR_STATE_FAULT;
1063 pr_err("(%s): DriveFault Connect Timeout (%x)\n",
1064 skd_name(skdev), skdev->drive_state);
1066 /*start the queue so we can respond with error to requests */
1067 /* wakeup anyone waiting for startup complete */
1068 blk_start_queue(skdev->queue);
1069 skdev->gendisk_on = -1;
1070 wake_up_interruptible(&skdev->waitq);
1073 case SKD_DRVR_STATE_ONLINE:
1074 /* shouldn't get here. */
1077 case SKD_DRVR_STATE_PAUSING:
1078 case SKD_DRVR_STATE_PAUSED:
1081 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1082 pr_debug("%s:%s:%d "
1083 "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1084 skdev->name, __func__, __LINE__,
1086 skdev->timer_countdown,
1088 skdev->timeout_slot[skdev->timo_slot]);
1089 /* if the slot has cleared we can let the I/O continue */
1090 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1091 pr_debug("%s:%s:%d Slot drained, starting queue.\n",
1092 skdev->name, __func__, __LINE__);
1093 skdev->state = SKD_DRVR_STATE_ONLINE;
1094 blk_start_queue(skdev->queue);
1097 if (skdev->timer_countdown > 0) {
1098 skdev->timer_countdown--;
1101 skd_restart_device(skdev);
1104 case SKD_DRVR_STATE_RESTARTING:
1105 if (skdev->timer_countdown > 0) {
1106 skdev->timer_countdown--;
1109 /* For now, we fault the drive. Could attempt resets to
1110 * revcover at some point. */
1111 skdev->state = SKD_DRVR_STATE_FAULT;
1112 pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
1113 skd_name(skdev), skdev->drive_state);
1116 * Recovering does two things:
1117 * 1. completes IO with error
1118 * 2. reclaims dma resources
1119 * When is it safe to recover requests?
1120 * - if the drive state is faulted
1121 * - if the state is still soft reset after out timeout
1122 * - if the drive registers are dead (state = FF)
1123 * If it is "unsafe", we still need to recover, so we will
1124 * disable pci bus mastering and disable our interrupts.
1127 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1128 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1129 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1130 /* It never came out of soft reset. Try to
1131 * recover the requests and then let them
1132 * fail. This is to mitigate hung processes. */
1133 skd_recover_requests(skdev, 0);
1135 pr_err("(%s): Disable BusMaster (%x)\n",
1136 skd_name(skdev), skdev->drive_state);
1137 pci_disable_device(skdev->pdev);
1138 skd_disable_interrupts(skdev);
1139 skd_recover_requests(skdev, 0);
1142 /*start the queue so we can respond with error to requests */
1143 /* wakeup anyone waiting for startup complete */
1144 blk_start_queue(skdev->queue);
1145 skdev->gendisk_on = -1;
1146 wake_up_interruptible(&skdev->waitq);
1149 case SKD_DRVR_STATE_RESUMING:
1150 case SKD_DRVR_STATE_STOPPING:
1151 case SKD_DRVR_STATE_SYNCING:
1152 case SKD_DRVR_STATE_FAULT:
1153 case SKD_DRVR_STATE_DISAPPEARED:
1159 static int skd_start_timer(struct skd_device *skdev)
1163 init_timer(&skdev->timer);
1164 setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1166 rc = mod_timer(&skdev->timer, (jiffies + HZ));
1168 pr_err("%s: failed to start timer %d\n",
1173 static void skd_kill_timer(struct skd_device *skdev)
1175 del_timer_sync(&skdev->timer);
1179 *****************************************************************************
1181 *****************************************************************************
1183 static int skd_ioctl_sg_io(struct skd_device *skdev,
1184 fmode_t mode, void __user *argp);
1185 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1186 struct skd_sg_io *sksgio);
1187 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1188 struct skd_sg_io *sksgio);
1189 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1190 struct skd_sg_io *sksgio);
1191 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1192 struct skd_sg_io *sksgio, int dxfer_dir);
1193 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1194 struct skd_sg_io *sksgio);
1195 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1196 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1197 struct skd_sg_io *sksgio);
1198 static int skd_sg_io_put_status(struct skd_device *skdev,
1199 struct skd_sg_io *sksgio);
1201 static void skd_complete_special(struct skd_device *skdev,
1202 volatile struct fit_completion_entry_v1
1204 volatile struct fit_comp_error_info *skerr,
1205 struct skd_special_context *skspcl);
1207 static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1208 uint cmd_in, ulong arg)
1211 struct gendisk *disk = bdev->bd_disk;
1212 struct skd_device *skdev = disk->private_data;
1213 void __user *p = (void *)arg;
1215 pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
1216 skdev->name, __func__, __LINE__,
1217 disk->disk_name, current->comm, mode, cmd_in, arg);
1219 if (!capable(CAP_SYS_ADMIN))
1223 case SG_SET_TIMEOUT:
1224 case SG_GET_TIMEOUT:
1225 case SG_GET_VERSION_NUM:
1226 rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
1229 rc = skd_ioctl_sg_io(skdev, mode, p);
1237 pr_debug("%s:%s:%d %s: completion rc %d\n",
1238 skdev->name, __func__, __LINE__, disk->disk_name, rc);
1242 static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1246 struct skd_sg_io sksgio;
1248 memset(&sksgio, 0, sizeof(sksgio));
1251 sksgio.iov = &sksgio.no_iov_iov;
1253 switch (skdev->state) {
1254 case SKD_DRVR_STATE_ONLINE:
1255 case SKD_DRVR_STATE_BUSY_IMMINENT:
1259 pr_debug("%s:%s:%d drive not online\n",
1260 skdev->name, __func__, __LINE__);
1265 rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1269 rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1273 rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1277 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1281 rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1285 rc = skd_sg_io_await(skdev, &sksgio);
1289 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1293 rc = skd_sg_io_put_status(skdev, &sksgio);
1300 skd_sg_io_release_skspcl(skdev, &sksgio);
1302 if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1307 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1308 struct skd_sg_io *sksgio)
1310 struct sg_io_hdr *sgp = &sksgio->sg;
1313 if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
1314 pr_debug("%s:%s:%d access sg failed %p\n",
1315 skdev->name, __func__, __LINE__, sksgio->argp);
1319 if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
1320 pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
1321 skdev->name, __func__, __LINE__, sksgio->argp);
1325 if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
1326 pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
1327 skdev->name, __func__, __LINE__, sgp->interface_id);
1331 if (sgp->cmd_len > sizeof(sksgio->cdb)) {
1332 pr_debug("%s:%s:%d cmd_len invalid %d\n",
1333 skdev->name, __func__, __LINE__, sgp->cmd_len);
1337 if (sgp->iovec_count > 256) {
1338 pr_debug("%s:%s:%d iovec_count invalid %d\n",
1339 skdev->name, __func__, __LINE__, sgp->iovec_count);
1343 if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
1344 pr_debug("%s:%s:%d dxfer_len invalid %d\n",
1345 skdev->name, __func__, __LINE__, sgp->dxfer_len);
1349 switch (sgp->dxfer_direction) {
1354 case SG_DXFER_TO_DEV:
1358 case SG_DXFER_FROM_DEV:
1359 case SG_DXFER_TO_FROM_DEV:
1364 pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
1365 skdev->name, __func__, __LINE__, sgp->dxfer_direction);
1369 if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
1370 pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
1371 skdev->name, __func__, __LINE__, sgp->cmdp);
1375 if (sgp->mx_sb_len != 0) {
1376 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
1377 pr_debug("%s:%s:%d access sbp failed %p\n",
1378 skdev->name, __func__, __LINE__, sgp->sbp);
1383 if (sgp->iovec_count == 0) {
1384 sksgio->iov[0].iov_base = sgp->dxferp;
1385 sksgio->iov[0].iov_len = sgp->dxfer_len;
1387 sksgio->dxfer_len = sgp->dxfer_len;
1389 struct sg_iovec *iov;
1390 uint nbytes = sizeof(*iov) * sgp->iovec_count;
1391 size_t iov_data_len;
1393 iov = kmalloc(nbytes, GFP_KERNEL);
1395 pr_debug("%s:%s:%d alloc iovec failed %d\n",
1396 skdev->name, __func__, __LINE__,
1401 sksgio->iovcnt = sgp->iovec_count;
1403 if (copy_from_user(iov, sgp->dxferp, nbytes)) {
1404 pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
1405 skdev->name, __func__, __LINE__, sgp->dxferp);
1410 * Sum up the vecs, making sure they don't overflow
1413 for (i = 0; i < sgp->iovec_count; i++) {
1414 if (iov_data_len + iov[i].iov_len < iov_data_len)
1416 iov_data_len += iov[i].iov_len;
1419 /* SG_IO howto says that the shorter of the two wins */
1420 if (sgp->dxfer_len < iov_data_len) {
1421 sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1424 sksgio->dxfer_len = sgp->dxfer_len;
1426 sksgio->dxfer_len = iov_data_len;
1429 if (sgp->dxfer_direction != SG_DXFER_NONE) {
1430 struct sg_iovec *iov = sksgio->iov;
1431 for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1432 if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
1433 pr_debug("%s:%s:%d access data failed %p/%d\n",
1434 skdev->name, __func__, __LINE__,
1435 iov->iov_base, (int)iov->iov_len);
1444 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1445 struct skd_sg_io *sksgio)
1447 struct skd_special_context *skspcl = NULL;
1453 spin_lock_irqsave(&skdev->lock, flags);
1454 skspcl = skdev->skspcl_free_list;
1455 if (skspcl != NULL) {
1456 skdev->skspcl_free_list =
1457 (struct skd_special_context *)skspcl->req.next;
1458 skspcl->req.id += SKD_ID_INCR;
1459 skspcl->req.state = SKD_REQ_STATE_SETUP;
1460 skspcl->orphaned = 0;
1461 skspcl->req.n_sg = 0;
1463 spin_unlock_irqrestore(&skdev->lock, flags);
1465 if (skspcl != NULL) {
1470 pr_debug("%s:%s:%d blocking\n",
1471 skdev->name, __func__, __LINE__);
1473 rc = wait_event_interruptible_timeout(
1475 (skdev->skspcl_free_list != NULL),
1476 msecs_to_jiffies(sksgio->sg.timeout));
1478 pr_debug("%s:%s:%d unblocking, rc=%d\n",
1479 skdev->name, __func__, __LINE__, rc);
1489 * If we get here rc > 0 meaning the timeout to
1490 * wait_event_interruptible_timeout() had time left, hence the
1491 * sought event -- non-empty free list -- happened.
1492 * Retry the allocation.
1495 sksgio->skspcl = skspcl;
1500 static int skd_skreq_prep_buffering(struct skd_device *skdev,
1501 struct skd_request_context *skreq,
1504 u32 resid = dxfer_len;
1507 * The DMA engine must have aligned addresses and byte counts.
1509 resid += (-resid) & 3;
1510 skreq->sg_byte_count = resid;
1515 u32 nbytes = PAGE_SIZE;
1516 u32 ix = skreq->n_sg;
1517 struct scatterlist *sg = &skreq->sg[ix];
1518 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1524 page = alloc_page(GFP_KERNEL);
1528 sg_set_page(sg, page, nbytes, 0);
1530 /* TODO: This should be going through a pci_???()
1531 * routine to do proper mapping. */
1532 sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1533 sksg->byte_count = nbytes;
1535 sksg->host_side_addr = sg_phys(sg);
1537 sksg->dev_side_addr = 0;
1538 sksg->next_desc_ptr = skreq->sksg_dma_address +
1539 (ix + 1) * sizeof(*sksg);
1545 if (skreq->n_sg > 0) {
1546 u32 ix = skreq->n_sg - 1;
1547 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1549 sksg->control = FIT_SGD_CONTROL_LAST;
1550 sksg->next_desc_ptr = 0;
1553 if (unlikely(skdev->dbg_level > 1)) {
1556 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
1557 skdev->name, __func__, __LINE__,
1558 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1559 for (i = 0; i < skreq->n_sg; i++) {
1560 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1562 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
1563 "addr=0x%llx next=0x%llx\n",
1564 skdev->name, __func__, __LINE__,
1565 i, sgd->byte_count, sgd->control,
1566 sgd->host_side_addr, sgd->next_desc_ptr);
1573 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1574 struct skd_sg_io *sksgio)
1576 struct skd_special_context *skspcl = sksgio->skspcl;
1577 struct skd_request_context *skreq = &skspcl->req;
1578 u32 dxfer_len = sksgio->dxfer_len;
1581 rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1583 * Eventually, errors or not, skd_release_special() is called
1584 * to recover allocations including partial allocations.
1589 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1590 struct skd_sg_io *sksgio, int dxfer_dir)
1592 struct skd_special_context *skspcl = sksgio->skspcl;
1594 struct sg_iovec curiov;
1598 u32 resid = sksgio->dxfer_len;
1602 curiov.iov_base = NULL;
1604 if (dxfer_dir != sksgio->sg.dxfer_direction) {
1605 if (dxfer_dir != SG_DXFER_TO_DEV ||
1606 sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
1611 u32 nbytes = PAGE_SIZE;
1613 if (curiov.iov_len == 0) {
1614 curiov = sksgio->iov[iov_ix++];
1620 page = sg_page(&skspcl->req.sg[sksg_ix++]);
1621 bufp = page_address(page);
1622 buf_len = PAGE_SIZE;
1625 nbytes = min_t(u32, nbytes, resid);
1626 nbytes = min_t(u32, nbytes, curiov.iov_len);
1627 nbytes = min_t(u32, nbytes, buf_len);
1629 if (dxfer_dir == SG_DXFER_TO_DEV)
1630 rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
1632 rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
1638 curiov.iov_len -= nbytes;
1639 curiov.iov_base += nbytes;
1646 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1647 struct skd_sg_io *sksgio)
1649 struct skd_special_context *skspcl = sksgio->skspcl;
1650 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
1651 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
1653 memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
1655 /* Initialize the FIT msg header */
1656 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1657 fmh->num_protocol_cmds_coalesced = 1;
1659 /* Initialize the SCSI request */
1660 if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
1661 scsi_req->hdr.sg_list_dma_address =
1662 cpu_to_be64(skspcl->req.sksg_dma_address);
1663 scsi_req->hdr.tag = skspcl->req.id;
1664 scsi_req->hdr.sg_list_len_bytes =
1665 cpu_to_be32(skspcl->req.sg_byte_count);
1666 memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
1668 skspcl->req.state = SKD_REQ_STATE_BUSY;
1669 skd_send_special_fitmsg(skdev, skspcl);
1674 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
1676 unsigned long flags;
1679 rc = wait_event_interruptible_timeout(skdev->waitq,
1680 (sksgio->skspcl->req.state !=
1681 SKD_REQ_STATE_BUSY),
1682 msecs_to_jiffies(sksgio->sg.
1685 spin_lock_irqsave(&skdev->lock, flags);
1687 if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
1688 pr_debug("%s:%s:%d skspcl %p aborted\n",
1689 skdev->name, __func__, __LINE__, sksgio->skspcl);
1691 /* Build check cond, sense and let command finish. */
1692 /* For a timeout, we must fabricate completion and sense
1693 * data to complete the command */
1694 sksgio->skspcl->req.completion.status =
1695 SAM_STAT_CHECK_CONDITION;
1697 memset(&sksgio->skspcl->req.err_info, 0,
1698 sizeof(sksgio->skspcl->req.err_info));
1699 sksgio->skspcl->req.err_info.type = 0x70;
1700 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
1701 sksgio->skspcl->req.err_info.code = 0x44;
1702 sksgio->skspcl->req.err_info.qual = 0;
1704 } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
1705 /* No longer on the adapter. We finish. */
1708 /* Something's gone wrong. Still busy. Timeout or
1709 * user interrupted (control-C). Mark as an orphan
1710 * so it will be disposed when completed. */
1711 sksgio->skspcl->orphaned = 1;
1712 sksgio->skspcl = NULL;
1714 pr_debug("%s:%s:%d timed out %p (%u ms)\n",
1715 skdev->name, __func__, __LINE__,
1716 sksgio, sksgio->sg.timeout);
1719 pr_debug("%s:%s:%d cntlc %p\n",
1720 skdev->name, __func__, __LINE__, sksgio);
1725 spin_unlock_irqrestore(&skdev->lock, flags);
1730 static int skd_sg_io_put_status(struct skd_device *skdev,
1731 struct skd_sg_io *sksgio)
1733 struct sg_io_hdr *sgp = &sksgio->sg;
1734 struct skd_special_context *skspcl = sksgio->skspcl;
1737 u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
1739 sgp->status = skspcl->req.completion.status;
1740 resid = sksgio->dxfer_len - nb;
1742 sgp->masked_status = sgp->status & STATUS_MASK;
1743 sgp->msg_status = 0;
1744 sgp->host_status = 0;
1745 sgp->driver_status = 0;
1747 if (sgp->masked_status || sgp->host_status || sgp->driver_status)
1748 sgp->info |= SG_INFO_CHECK;
1750 pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
1751 skdev->name, __func__, __LINE__,
1752 sgp->status, sgp->masked_status, sgp->resid);
1754 if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
1755 if (sgp->mx_sb_len > 0) {
1756 struct fit_comp_error_info *ei = &skspcl->req.err_info;
1757 u32 nbytes = sizeof(*ei);
1759 nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
1761 sgp->sb_len_wr = nbytes;
1763 if (__copy_to_user(sgp->sbp, ei, nbytes)) {
1764 pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
1765 skdev->name, __func__, __LINE__,
1772 if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
1773 pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
1774 skdev->name, __func__, __LINE__, sksgio->argp);
1781 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1782 struct skd_sg_io *sksgio)
1784 struct skd_special_context *skspcl = sksgio->skspcl;
1786 if (skspcl != NULL) {
1789 sksgio->skspcl = NULL;
1791 spin_lock_irqsave(&skdev->lock, flags);
1792 skd_release_special(skdev, skspcl);
1793 spin_unlock_irqrestore(&skdev->lock, flags);
1800 *****************************************************************************
1801 * INTERNAL REQUESTS -- generated by driver itself
1802 *****************************************************************************
1805 static int skd_format_internal_skspcl(struct skd_device *skdev)
1807 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1808 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1809 struct fit_msg_hdr *fmh;
1810 uint64_t dma_address;
1811 struct skd_scsi_request *scsi;
1813 fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
1814 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1815 fmh->num_protocol_cmds_coalesced = 1;
1817 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1818 memset(scsi, 0, sizeof(*scsi));
1819 dma_address = skspcl->req.sksg_dma_address;
1820 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1821 sgd->control = FIT_SGD_CONTROL_LAST;
1822 sgd->byte_count = 0;
1823 sgd->host_side_addr = skspcl->db_dma_address;
1824 sgd->dev_side_addr = 0;
1825 sgd->next_desc_ptr = 0LL;
1830 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1832 static void skd_send_internal_skspcl(struct skd_device *skdev,
1833 struct skd_special_context *skspcl,
1836 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1837 struct skd_scsi_request *scsi;
1838 unsigned char *buf = skspcl->data_buf;
1841 if (skspcl->req.state != SKD_REQ_STATE_IDLE)
1843 * A refresh is already in progress.
1844 * Just wait for it to finish.
1848 SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
1849 skspcl->req.state = SKD_REQ_STATE_BUSY;
1850 skspcl->req.id += SKD_ID_INCR;
1852 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1853 scsi->hdr.tag = skspcl->req.id;
1855 memset(scsi->cdb, 0, sizeof(scsi->cdb));
1858 case TEST_UNIT_READY:
1859 scsi->cdb[0] = TEST_UNIT_READY;
1860 sgd->byte_count = 0;
1861 scsi->hdr.sg_list_len_bytes = 0;
1865 scsi->cdb[0] = READ_CAPACITY;
1866 sgd->byte_count = SKD_N_READ_CAP_BYTES;
1867 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1871 scsi->cdb[0] = INQUIRY;
1872 scsi->cdb[1] = 0x01; /* evpd */
1873 scsi->cdb[2] = 0x80; /* serial number page */
1874 scsi->cdb[4] = 0x10;
1875 sgd->byte_count = 16;
1876 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1879 case SYNCHRONIZE_CACHE:
1880 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1881 sgd->byte_count = 0;
1882 scsi->hdr.sg_list_len_bytes = 0;
1886 scsi->cdb[0] = WRITE_BUFFER;
1887 scsi->cdb[1] = 0x02;
1888 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1889 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1890 sgd->byte_count = WR_BUF_SIZE;
1891 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1892 /* fill incrementing byte pattern */
1893 for (i = 0; i < sgd->byte_count; i++)
1898 scsi->cdb[0] = READ_BUFFER;
1899 scsi->cdb[1] = 0x02;
1900 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1901 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1902 sgd->byte_count = WR_BUF_SIZE;
1903 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1904 memset(skspcl->data_buf, 0, sgd->byte_count);
1908 SKD_ASSERT("Don't know what to send");
1912 skd_send_special_fitmsg(skdev, skspcl);
1915 static void skd_refresh_device_data(struct skd_device *skdev)
1917 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1919 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1922 static int skd_chk_read_buf(struct skd_device *skdev,
1923 struct skd_special_context *skspcl)
1925 unsigned char *buf = skspcl->data_buf;
1928 /* check for incrementing byte pattern */
1929 for (i = 0; i < WR_BUF_SIZE; i++)
1930 if (buf[i] != (i & 0xFF))
1936 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1937 u8 code, u8 qual, u8 fruc)
1939 /* If the check condition is of special interest, log a message */
1940 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1941 && (code == 0x04) && (qual == 0x06)) {
1942 pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
1943 "ascq/fruc %02x/%02x/%02x/%02x\n",
1944 skd_name(skdev), key, code, qual, fruc);
1948 static void skd_complete_internal(struct skd_device *skdev,
1949 volatile struct fit_completion_entry_v1
1951 volatile struct fit_comp_error_info *skerr,
1952 struct skd_special_context *skspcl)
1954 u8 *buf = skspcl->data_buf;
1957 struct skd_scsi_request *scsi =
1958 (struct skd_scsi_request *)&skspcl->msg_buf[64];
1960 SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1962 pr_debug("%s:%s:%d complete internal %x\n",
1963 skdev->name, __func__, __LINE__, scsi->cdb[0]);
1965 skspcl->req.completion = *skcomp;
1966 skspcl->req.state = SKD_REQ_STATE_IDLE;
1967 skspcl->req.id += SKD_ID_INCR;
1969 status = skspcl->req.completion.status;
1971 skd_log_check_status(skdev, status, skerr->key, skerr->code,
1972 skerr->qual, skerr->fruc);
1974 switch (scsi->cdb[0]) {
1975 case TEST_UNIT_READY:
1976 if (status == SAM_STAT_GOOD)
1977 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1978 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1979 (skerr->key == MEDIUM_ERROR))
1980 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1982 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1983 pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
1984 skdev->name, __func__, __LINE__,
1988 pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
1989 skdev->name, __func__, __LINE__);
1990 skd_send_internal_skspcl(skdev, skspcl, 0x00);
1995 if (status == SAM_STAT_GOOD)
1996 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1998 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1999 pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
2000 skdev->name, __func__, __LINE__,
2004 pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
2005 skdev->name, __func__, __LINE__);
2006 skd_send_internal_skspcl(skdev, skspcl, 0x00);
2011 if (status == SAM_STAT_GOOD) {
2012 if (skd_chk_read_buf(skdev, skspcl) == 0)
2013 skd_send_internal_skspcl(skdev, skspcl,
2017 "(%s):*** W/R Buffer mismatch %d ***\n",
2018 skd_name(skdev), skdev->connect_retries);
2019 if (skdev->connect_retries <
2020 SKD_MAX_CONNECT_RETRIES) {
2021 skdev->connect_retries++;
2022 skd_soft_reset(skdev);
2025 "(%s): W/R Buffer Connect Error\n",
2032 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2033 pr_debug("%s:%s:%d "
2034 "read buffer failed, don't send anymore state 0x%x\n",
2035 skdev->name, __func__, __LINE__,
2039 pr_debug("%s:%s:%d "
2040 "**** read buffer failed, retry skerr\n",
2041 skdev->name, __func__, __LINE__);
2042 skd_send_internal_skspcl(skdev, skspcl, 0x00);
2047 skdev->read_cap_is_valid = 0;
2048 if (status == SAM_STAT_GOOD) {
2049 skdev->read_cap_last_lba =
2050 (buf[0] << 24) | (buf[1] << 16) |
2051 (buf[2] << 8) | buf[3];
2052 skdev->read_cap_blocksize =
2053 (buf[4] << 24) | (buf[5] << 16) |
2054 (buf[6] << 8) | buf[7];
2056 pr_debug("%s:%s:%d last lba %d, bs %d\n",
2057 skdev->name, __func__, __LINE__,
2058 skdev->read_cap_last_lba,
2059 skdev->read_cap_blocksize);
2061 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2063 skdev->read_cap_is_valid = 1;
2065 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2066 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
2067 (skerr->key == MEDIUM_ERROR)) {
2068 skdev->read_cap_last_lba = ~0;
2069 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2070 pr_debug("%s:%s:%d "
2071 "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
2072 skdev->name, __func__, __LINE__);
2073 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2075 pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
2076 skdev->name, __func__, __LINE__);
2077 skd_send_internal_skspcl(skdev, skspcl,
2083 skdev->inquiry_is_valid = 0;
2084 if (status == SAM_STAT_GOOD) {
2085 skdev->inquiry_is_valid = 1;
2087 for (i = 0; i < 12; i++)
2088 skdev->inq_serial_num[i] = buf[i + 4];
2089 skdev->inq_serial_num[12] = 0;
2092 if (skd_unquiesce_dev(skdev) < 0)
2093 pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
2094 skdev->name, __func__, __LINE__);
2095 /* connection is complete */
2096 skdev->connect_retries = 0;
2099 case SYNCHRONIZE_CACHE:
2100 if (status == SAM_STAT_GOOD)
2101 skdev->sync_done = 1;
2103 skdev->sync_done = -1;
2104 wake_up_interruptible(&skdev->waitq);
2108 SKD_ASSERT("we didn't send this");
2113 *****************************************************************************
2115 *****************************************************************************
2118 static void skd_send_fitmsg(struct skd_device *skdev,
2119 struct skd_fitmsg_context *skmsg)
2122 struct fit_msg_hdr *fmh;
2124 pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
2125 skdev->name, __func__, __LINE__,
2126 skmsg->mb_dma_address, skdev->in_flight);
2127 pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
2128 skdev->name, __func__, __LINE__,
2129 skmsg->msg_buf, skmsg->offset);
2131 qcmd = skmsg->mb_dma_address;
2132 qcmd |= FIT_QCMD_QID_NORMAL;
2134 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
2135 skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2137 if (unlikely(skdev->dbg_level > 1)) {
2138 u8 *bp = (u8 *)skmsg->msg_buf;
2140 for (i = 0; i < skmsg->length; i += 8) {
2141 pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
2142 "%02x %02x %02x %02x\n",
2143 skdev->name, __func__, __LINE__,
2144 i, bp[i + 0], bp[i + 1], bp[i + 2],
2145 bp[i + 3], bp[i + 4], bp[i + 5],
2146 bp[i + 6], bp[i + 7]);
2152 if (skmsg->length > 256)
2153 qcmd |= FIT_QCMD_MSGSIZE_512;
2154 else if (skmsg->length > 128)
2155 qcmd |= FIT_QCMD_MSGSIZE_256;
2156 else if (skmsg->length > 64)
2157 qcmd |= FIT_QCMD_MSGSIZE_128;
2160 * This makes no sense because the FIT msg header is
2161 * 64 bytes. If the msg is only 64 bytes long it has
2164 qcmd |= FIT_QCMD_MSGSIZE_64;
2166 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2169 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2173 static void skd_send_special_fitmsg(struct skd_device *skdev,
2174 struct skd_special_context *skspcl)
2178 if (unlikely(skdev->dbg_level > 1)) {
2179 u8 *bp = (u8 *)skspcl->msg_buf;
2182 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
2183 pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x "
2184 "%02x %02x %02x %02x\n",
2185 skdev->name, __func__, __LINE__, i,
2186 bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
2187 bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
2192 pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2193 skdev->name, __func__, __LINE__,
2194 skspcl, skspcl->req.id, skspcl->req.sksg_list,
2195 skspcl->req.sksg_dma_address);
2196 for (i = 0; i < skspcl->req.n_sg; i++) {
2197 struct fit_sg_descriptor *sgd =
2198 &skspcl->req.sksg_list[i];
2200 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
2201 "addr=0x%llx next=0x%llx\n",
2202 skdev->name, __func__, __LINE__,
2203 i, sgd->byte_count, sgd->control,
2204 sgd->host_side_addr, sgd->next_desc_ptr);
2209 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2210 * and one 64-byte SSDI command.
2212 qcmd = skspcl->mb_dma_address;
2213 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2215 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2218 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2222 *****************************************************************************
2224 *****************************************************************************
2227 static void skd_complete_other(struct skd_device *skdev,
2228 volatile struct fit_completion_entry_v1 *skcomp,
2229 volatile struct fit_comp_error_info *skerr);
2238 enum skd_check_status_action action;
2241 static struct sns_info skd_chkstat_table[] = {
2243 { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
2244 SKD_CHECK_STATUS_REPORT_GOOD },
2247 { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
2248 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2249 { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
2250 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2251 { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
2252 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2254 /* Retry (with limits) */
2255 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
2256 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2257 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
2258 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2259 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
2260 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2261 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
2262 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2264 /* Busy (or about to be) */
2265 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
2266 SKD_CHECK_STATUS_BUSY_IMMINENT },
2270 * Look up status and sense data to decide how to handle the error
2272 * mask says which fields must match e.g., mask=0x18 means check
2273 * type and stat, ignore key, asc, ascq.
2276 static enum skd_check_status_action
2277 skd_check_status(struct skd_device *skdev,
2278 u8 cmp_status, volatile struct fit_comp_error_info *skerr)
2282 pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2283 skd_name(skdev), skerr->key, skerr->code, skerr->qual,
2286 pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2287 skdev->name, __func__, __LINE__, skerr->type, cmp_status,
2288 skerr->key, skerr->code, skerr->qual, skerr->fruc);
2290 /* Does the info match an entry in the good category? */
2291 n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
2292 for (i = 0; i < n; i++) {
2293 struct sns_info *sns = &skd_chkstat_table[i];
2295 if (sns->mask & 0x10)
2296 if (skerr->type != sns->type)
2299 if (sns->mask & 0x08)
2300 if (cmp_status != sns->stat)
2303 if (sns->mask & 0x04)
2304 if (skerr->key != sns->key)
2307 if (sns->mask & 0x02)
2308 if (skerr->code != sns->asc)
2311 if (sns->mask & 0x01)
2312 if (skerr->qual != sns->ascq)
2315 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
2316 pr_err("(%s): SMART Alert: sense key/asc/ascq "
2318 skd_name(skdev), skerr->key,
2319 skerr->code, skerr->qual);
2324 /* No other match, so nonzero status means error,
2325 * zero status means good
2328 pr_debug("%s:%s:%d status check: error\n",
2329 skdev->name, __func__, __LINE__);
2330 return SKD_CHECK_STATUS_REPORT_ERROR;
2333 pr_debug("%s:%s:%d status check good default\n",
2334 skdev->name, __func__, __LINE__);
2335 return SKD_CHECK_STATUS_REPORT_GOOD;
2338 static void skd_resolve_req_exception(struct skd_device *skdev,
2339 struct skd_request_context *skreq)
2341 u8 cmp_status = skreq->completion.status;
2343 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2344 case SKD_CHECK_STATUS_REPORT_GOOD:
2345 case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2346 skd_end_request(skdev, skreq, 0);
2349 case SKD_CHECK_STATUS_BUSY_IMMINENT:
2350 skd_log_skreq(skdev, skreq, "retry(busy)");
2351 blk_requeue_request(skdev->queue, skreq->req);
2352 pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
2353 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2354 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2355 skd_quiesce_dev(skdev);
2358 case SKD_CHECK_STATUS_REQUEUE_REQUEST:
2359 if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
2360 skd_log_skreq(skdev, skreq, "retry");
2361 blk_requeue_request(skdev->queue, skreq->req);
2364 /* fall through to report error */
2366 case SKD_CHECK_STATUS_REPORT_ERROR:
2368 skd_end_request(skdev, skreq, -EIO);
2373 /* assume spinlock is already held */
2374 static void skd_release_skreq(struct skd_device *skdev,
2375 struct skd_request_context *skreq)
2378 struct skd_fitmsg_context *skmsg;
2383 * Reclaim the FIT msg buffer if this is
2384 * the first of the requests it carried to
2385 * be completed. The FIT msg buffer used to
2386 * send this request cannot be reused until
2387 * we are sure the s1120 card has copied
2388 * it to its memory. The FIT msg might have
2389 * contained several requests. As soon as
2390 * any of them are completed we know that
2391 * the entire FIT msg was transferred.
2392 * Only the first completed request will
2393 * match the FIT msg buffer id. The FIT
2394 * msg buffer id is immediately updated.
2395 * When subsequent requests complete the FIT
2396 * msg buffer id won't match, so we know
2397 * quite cheaply that it is already done.
2399 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2400 SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2402 skmsg = &skdev->skmsg_table[msg_slot];
2403 if (skmsg->id == skreq->fitmsg_id) {
2404 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2405 SKD_ASSERT(skmsg->outstanding > 0);
2406 skmsg->outstanding--;
2407 if (skmsg->outstanding == 0) {
2408 skmsg->state = SKD_MSG_STATE_IDLE;
2409 skmsg->id += SKD_ID_INCR;
2410 skmsg->next = skdev->skmsg_free_list;
2411 skdev->skmsg_free_list = skmsg;
2416 * Decrease the number of active requests.
2417 * Also decrements the count in the timeout slot.
2419 SKD_ASSERT(skdev->in_flight > 0);
2420 skdev->in_flight -= 1;
2422 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2423 SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2424 skdev->timeout_slot[timo_slot] -= 1;
2432 * Reclaim the skd_request_context
2434 skreq->state = SKD_REQ_STATE_IDLE;
2435 skreq->id += SKD_ID_INCR;
2436 skreq->next = skdev->skreq_free_list;
2437 skdev->skreq_free_list = skreq;
2440 #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
2442 static void skd_do_inq_page_00(struct skd_device *skdev,
2443 volatile struct fit_completion_entry_v1 *skcomp,
2444 volatile struct fit_comp_error_info *skerr,
2445 uint8_t *cdb, uint8_t *buf)
2447 uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2449 /* Caller requested "supported pages". The driver needs to insert
2452 pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
2453 skdev->name, __func__, __LINE__);
2455 /* If the device rejected the request because the CDB was
2456 * improperly formed, then just leave.
2458 if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2459 skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2462 /* Get the amount of space the caller allocated */
2463 max_bytes = (cdb[3] << 8) | cdb[4];
2465 /* Get the number of pages actually returned by the device */
2466 drive_pages = (buf[2] << 8) | buf[3];
2467 drive_bytes = drive_pages + 4;
2468 new_size = drive_pages + 1;
2470 /* Supported pages must be in numerical order, so find where
2471 * the driver page needs to be inserted into the list of
2472 * pages returned by the device.
2474 for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2475 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2476 return; /* Device using this page code. abort */
2477 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2481 if (insert_pt < max_bytes) {
2484 /* Shift everything up one byte to make room. */
2485 for (u = new_size + 3; u > insert_pt; u--)
2486 buf[u] = buf[u - 1];
2487 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2489 /* SCSI byte order increment of num_returned_bytes by 1 */
2490 skcomp->num_returned_bytes =
2491 be32_to_cpu(skcomp->num_returned_bytes) + 1;
2492 skcomp->num_returned_bytes =
2493 be32_to_cpu(skcomp->num_returned_bytes);
2496 /* update page length field to reflect the driver's page too */
2497 buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2498 buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2501 static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2507 pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2510 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2512 pci_bus_speed = linksta & 0xF;
2513 pci_lanes = (linksta & 0x3F0) >> 4;
2515 *speed = STEC_LINK_UNKNOWN;
2520 switch (pci_bus_speed) {
2522 *speed = STEC_LINK_2_5GTS;
2525 *speed = STEC_LINK_5GTS;
2528 *speed = STEC_LINK_8GTS;
2531 *speed = STEC_LINK_UNKNOWN;
2535 if (pci_lanes <= 0x20)
2541 static void skd_do_inq_page_da(struct skd_device *skdev,
2542 volatile struct fit_completion_entry_v1 *skcomp,
2543 volatile struct fit_comp_error_info *skerr,
2544 uint8_t *cdb, uint8_t *buf)
2546 struct pci_dev *pdev = skdev->pdev;
2548 struct driver_inquiry_data inq;
2551 pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
2552 skdev->name, __func__, __LINE__);
2554 memset(&inq, 0, sizeof(inq));
2556 inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2558 skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
2559 inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
2560 inq.pcie_device_number = PCI_SLOT(pdev->devfn);
2561 inq.pcie_function_number = PCI_FUNC(pdev->devfn);
2563 pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
2564 inq.pcie_vendor_id = cpu_to_be16(val);
2566 pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
2567 inq.pcie_device_id = cpu_to_be16(val);
2569 pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
2570 inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
2572 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
2573 inq.pcie_subsystem_device_id = cpu_to_be16(val);
2575 /* Driver version, fixed lenth, padded with spaces on the right */
2576 inq.driver_version_length = sizeof(inq.driver_version);
2577 memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
2578 memcpy(inq.driver_version, DRV_VER_COMPL,
2579 min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
2581 inq.page_length = cpu_to_be16((sizeof(inq) - 4));
2583 /* Clear the error set by the device */
2584 skcomp->status = SAM_STAT_GOOD;
2585 memset((void *)skerr, 0, sizeof(*skerr));
2587 /* copy response into output buffer */
2588 max_bytes = (cdb[3] << 8) | cdb[4];
2589 memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
2591 skcomp->num_returned_bytes =
2592 be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
2595 static void skd_do_driver_inq(struct skd_device *skdev,
2596 volatile struct fit_completion_entry_v1 *skcomp,
2597 volatile struct fit_comp_error_info *skerr,
2598 uint8_t *cdb, uint8_t *buf)
2602 else if (cdb[0] != INQUIRY)
2603 return; /* Not an INQUIRY */
2604 else if ((cdb[1] & 1) == 0)
2605 return; /* EVPD not set */
2606 else if (cdb[2] == 0)
2607 /* Need to add driver's page to supported pages list */
2608 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
2609 else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
2610 /* Caller requested driver's page */
2611 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
2614 static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
2623 static void skd_process_scsi_inq(struct skd_device *skdev,
2624 volatile struct fit_completion_entry_v1
2626 volatile struct fit_comp_error_info *skerr,
2627 struct skd_special_context *skspcl)
2630 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
2631 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
2633 dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
2634 skspcl->req.sg_data_dir);
2635 buf = skd_sg_1st_page_ptr(skspcl->req.sg);
2638 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
2642 static int skd_isr_completion_posted(struct skd_device *skdev,
2643 int limit, int *enqueued)
2645 volatile struct fit_completion_entry_v1 *skcmp = NULL;
2646 volatile struct fit_comp_error_info *skerr;
2649 struct skd_request_context *skreq;
2658 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
2660 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
2661 cmp_cycle = skcmp->cycle;
2662 cmp_cntxt = skcmp->tag;
2663 cmp_status = skcmp->status;
2664 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
2666 skerr = &skdev->skerr_table[skdev->skcomp_ix];
2668 pr_debug("%s:%s:%d "
2669 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
2670 "busy=%d rbytes=0x%x proto=%d\n",
2671 skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
2672 skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
2673 skdev->in_flight, cmp_bytes, skdev->proto_ver);
2675 if (cmp_cycle != skdev->skcomp_cycle) {
2676 pr_debug("%s:%s:%d end of completions\n",
2677 skdev->name, __func__, __LINE__);
2681 * Update the completion queue head index and possibly
2682 * the completion cycle count. 8-bit wrap-around.
2685 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
2686 skdev->skcomp_ix = 0;
2687 skdev->skcomp_cycle++;
2691 * The command context is a unique 32-bit ID. The low order
2692 * bits help locate the request. The request is usually a
2693 * r/w request (see skd_start() above) or a special request.
2696 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
2698 /* Is this other than a r/w request? */
2699 if (req_slot >= skdev->num_req_context) {
2701 * This is not a completion for a r/w request.
2703 skd_complete_other(skdev, skcmp, skerr);
2707 skreq = &skdev->skreq_table[req_slot];
2710 * Make sure the request ID for the slot matches.
2712 if (skreq->id != req_id) {
2713 pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
2714 skdev->name, __func__, __LINE__,
2717 u16 new_id = cmp_cntxt;
2718 pr_err("(%s): Completion mismatch "
2719 "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2720 skd_name(skdev), req_id,
2727 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
2729 if (skreq->state == SKD_REQ_STATE_ABORTED) {
2730 pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
2731 skdev->name, __func__, __LINE__,
2733 /* a previously timed out command can
2734 * now be cleaned up */
2735 skd_release_skreq(skdev, skreq);
2739 skreq->completion = *skcmp;
2740 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
2741 skreq->err_info = *skerr;
2742 skd_log_check_status(skdev, cmp_status, skerr->key,
2743 skerr->code, skerr->qual,
2746 /* Release DMA resources for the request. */
2747 if (skreq->n_sg > 0)
2748 skd_postop_sg_list(skdev, skreq);
2751 pr_debug("%s:%s:%d NULL backptr skdreq %p, "
2752 "req=0x%x req_id=0x%x\n",
2753 skdev->name, __func__, __LINE__,
2754 skreq, skreq->id, req_id);
2757 * Capture the outcome and post it back to the
2760 if (likely(cmp_status == SAM_STAT_GOOD))
2761 skd_end_request(skdev, skreq, 0);
2763 skd_resolve_req_exception(skdev, skreq);
2767 * Release the skreq, its FIT msg (if one), timeout slot,
2770 skd_release_skreq(skdev, skreq);
2772 /* skd_isr_comp_limit equal zero means no limit */
2774 if (++processed >= limit) {
2781 if ((skdev->state == SKD_DRVR_STATE_PAUSING)
2782 && (skdev->in_flight) == 0) {
2783 skdev->state = SKD_DRVR_STATE_PAUSED;
2784 wake_up_interruptible(&skdev->waitq);
2790 static void skd_complete_other(struct skd_device *skdev,
2791 volatile struct fit_completion_entry_v1 *skcomp,
2792 volatile struct fit_comp_error_info *skerr)
2797 struct skd_special_context *skspcl;
2799 req_id = skcomp->tag;
2800 req_table = req_id & SKD_ID_TABLE_MASK;
2801 req_slot = req_id & SKD_ID_SLOT_MASK;
2803 pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
2804 skdev->name, __func__, __LINE__,
2805 req_table, req_id, req_slot);
2808 * Based on the request id, determine how to dispatch this completion.
2809 * This swich/case is finding the good cases and forwarding the
2810 * completion entry. Errors are reported below the switch.
2812 switch (req_table) {
2813 case SKD_ID_RW_REQUEST:
2815 * The caller, skd_completion_posted_isr() above,
2816 * handles r/w requests. The only way we get here
2817 * is if the req_slot is out of bounds.
2821 case SKD_ID_SPECIAL_REQUEST:
2823 * Make sure the req_slot is in bounds and that the id
2826 if (req_slot < skdev->n_special) {
2827 skspcl = &skdev->skspcl_table[req_slot];
2828 if (skspcl->req.id == req_id &&
2829 skspcl->req.state == SKD_REQ_STATE_BUSY) {
2830 skd_complete_special(skdev,
2831 skcomp, skerr, skspcl);
2837 case SKD_ID_INTERNAL:
2838 if (req_slot == 0) {
2839 skspcl = &skdev->internal_skspcl;
2840 if (skspcl->req.id == req_id &&
2841 skspcl->req.state == SKD_REQ_STATE_BUSY) {
2842 skd_complete_internal(skdev,
2843 skcomp, skerr, skspcl);
2849 case SKD_ID_FIT_MSG:
2851 * These id's should never appear in a completion record.
2857 * These id's should never appear anywhere;
2863 * If we get here it is a bad or stale id.
2867 static void skd_complete_special(struct skd_device *skdev,
2868 volatile struct fit_completion_entry_v1
2870 volatile struct fit_comp_error_info *skerr,
2871 struct skd_special_context *skspcl)
2873 pr_debug("%s:%s:%d completing special request %p\n",
2874 skdev->name, __func__, __LINE__, skspcl);
2875 if (skspcl->orphaned) {
2876 /* Discard orphaned request */
2877 /* ?: Can this release directly or does it need
2878 * to use a worker? */
2879 pr_debug("%s:%s:%d release orphaned %p\n",
2880 skdev->name, __func__, __LINE__, skspcl);
2881 skd_release_special(skdev, skspcl);
2885 skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
2887 skspcl->req.state = SKD_REQ_STATE_COMPLETED;
2888 skspcl->req.completion = *skcomp;
2889 skspcl->req.err_info = *skerr;
2891 skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
2892 skerr->code, skerr->qual, skerr->fruc);
2894 wake_up_interruptible(&skdev->waitq);
2897 /* assume spinlock is already held */
2898 static void skd_release_special(struct skd_device *skdev,
2899 struct skd_special_context *skspcl)
2901 int i, was_depleted;
2903 for (i = 0; i < skspcl->req.n_sg; i++) {
2904 struct page *page = sg_page(&skspcl->req.sg[i]);
2908 was_depleted = (skdev->skspcl_free_list == NULL);
2910 skspcl->req.state = SKD_REQ_STATE_IDLE;
2911 skspcl->req.id += SKD_ID_INCR;
2913 (struct skd_request_context *)skdev->skspcl_free_list;
2914 skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
2917 pr_debug("%s:%s:%d skspcl was depleted\n",
2918 skdev->name, __func__, __LINE__);
2919 /* Free list was depleted. Their might be waiters. */
2920 wake_up_interruptible(&skdev->waitq);
2924 static void skd_reset_skcomp(struct skd_device *skdev)
2927 struct fit_completion_entry_v1 *skcomp;
2929 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
2930 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
2932 memset(skdev->skcomp_table, 0, nbytes);
2934 skdev->skcomp_ix = 0;
2935 skdev->skcomp_cycle = 1;
2939 *****************************************************************************
2941 *****************************************************************************
2943 static void skd_completion_worker(struct work_struct *work)
2945 struct skd_device *skdev =
2946 container_of(work, struct skd_device, completion_worker);
2947 unsigned long flags;
2948 int flush_enqueued = 0;
2950 spin_lock_irqsave(&skdev->lock, flags);
2953 * pass in limit=0, which means no limit..
2954 * process everything in compq
2956 skd_isr_completion_posted(skdev, 0, &flush_enqueued);
2957 skd_request_fn(skdev->queue);
2959 spin_unlock_irqrestore(&skdev->lock, flags);
2962 static void skd_isr_msg_from_dev(struct skd_device *skdev);
2965 static skd_isr(int irq, void *ptr)
2967 struct skd_device *skdev;
2972 int flush_enqueued = 0;
2974 skdev = (struct skd_device *)ptr;
2975 spin_lock(&skdev->lock);
2978 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2980 ack = FIT_INT_DEF_MASK;
2983 pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
2984 skdev->name, __func__, __LINE__, intstat, ack);
2986 /* As long as there is an int pending on device, keep
2987 * running loop. When none, get out, but if we've never
2988 * done any processing, call completion handler?
2991 /* No interrupts on device, but run the completion
2995 if (likely (skdev->state
2996 == SKD_DRVR_STATE_ONLINE))
3003 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
3005 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
3006 (skdev->state != SKD_DRVR_STATE_STOPPING))) {
3007 if (intstat & FIT_ISH_COMPLETION_POSTED) {
3009 * If we have already deferred completion
3010 * processing, don't bother running it again
3014 skd_isr_completion_posted(skdev,
3015 skd_isr_comp_limit, &flush_enqueued);
3018 if (intstat & FIT_ISH_FW_STATE_CHANGE) {
3019 skd_isr_fwstate(skdev);
3020 if (skdev->state == SKD_DRVR_STATE_FAULT ||
3022 SKD_DRVR_STATE_DISAPPEARED) {
3023 spin_unlock(&skdev->lock);
3028 if (intstat & FIT_ISH_MSG_FROM_DEV)
3029 skd_isr_msg_from_dev(skdev);
3033 if (unlikely(flush_enqueued))
3034 skd_request_fn(skdev->queue);
3037 schedule_work(&skdev->completion_worker);
3038 else if (!flush_enqueued)
3039 skd_request_fn(skdev->queue);
3041 spin_unlock(&skdev->lock);
3046 static void skd_drive_fault(struct skd_device *skdev)
3048 skdev->state = SKD_DRVR_STATE_FAULT;
3049 pr_err("(%s): Drive FAULT\n", skd_name(skdev));
3052 static void skd_drive_disappeared(struct skd_device *skdev)
3054 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
3055 pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
3058 static void skd_isr_fwstate(struct skd_device *skdev)
3063 int prev_driver_state = skdev->state;
3065 sense = SKD_READL(skdev, FIT_STATUS);
3066 state = sense & FIT_SR_DRIVE_STATE_MASK;
3068 pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
3070 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3071 skd_drive_state_to_str(state), state);
3073 skdev->drive_state = state;
3075 switch (skdev->drive_state) {
3076 case FIT_SR_DRIVE_INIT:
3077 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
3078 skd_disable_interrupts(skdev);
3081 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
3082 skd_recover_requests(skdev, 0);
3083 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
3084 skdev->timer_countdown = SKD_STARTING_TIMO;
3085 skdev->state = SKD_DRVR_STATE_STARTING;
3086 skd_soft_reset(skdev);
3089 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
3090 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3091 skdev->last_mtd = mtd;
3094 case FIT_SR_DRIVE_ONLINE:
3095 skdev->cur_max_queue_depth = skd_max_queue_depth;
3096 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
3097 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
3099 skdev->queue_low_water_mark =
3100 skdev->cur_max_queue_depth * 2 / 3 + 1;
3101 if (skdev->queue_low_water_mark < 1)
3102 skdev->queue_low_water_mark = 1;
3104 "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
3106 skdev->cur_max_queue_depth,
3107 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3109 skd_refresh_device_data(skdev);
3112 case FIT_SR_DRIVE_BUSY:
3113 skdev->state = SKD_DRVR_STATE_BUSY;
3114 skdev->timer_countdown = SKD_BUSY_TIMO;
3115 skd_quiesce_dev(skdev);
3117 case FIT_SR_DRIVE_BUSY_SANITIZE:
3118 /* set timer for 3 seconds, we'll abort any unfinished
3119 * commands after that expires
3121 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3122 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
3123 blk_start_queue(skdev->queue);
3125 case FIT_SR_DRIVE_BUSY_ERASE:
3126 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3127 skdev->timer_countdown = SKD_BUSY_TIMO;
3129 case FIT_SR_DRIVE_OFFLINE:
3130 skdev->state = SKD_DRVR_STATE_IDLE;
3132 case FIT_SR_DRIVE_SOFT_RESET:
3133 switch (skdev->state) {
3134 case SKD_DRVR_STATE_STARTING:
3135 case SKD_DRVR_STATE_RESTARTING:
3136 /* Expected by a caller of skd_soft_reset() */
3139 skdev->state = SKD_DRVR_STATE_RESTARTING;
3143 case FIT_SR_DRIVE_FW_BOOTING:
3144 pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
3145 skdev->name, __func__, __LINE__, skdev->name);
3146 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3147 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3150 case FIT_SR_DRIVE_DEGRADED:
3151 case FIT_SR_PCIE_LINK_DOWN:
3152 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3155 case FIT_SR_DRIVE_FAULT:
3156 skd_drive_fault(skdev);
3157 skd_recover_requests(skdev, 0);
3158 blk_start_queue(skdev->queue);
3161 /* PCIe bus returned all Fs? */
3163 pr_info("(%s): state=0x%x sense=0x%x\n",
3164 skd_name(skdev), state, sense);
3165 skd_drive_disappeared(skdev);
3166 skd_recover_requests(skdev, 0);
3167 blk_start_queue(skdev->queue);
3171 * Uknown FW State. Wait for a state we recognize.
3175 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3177 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3178 skd_skdev_state_to_str(skdev->state), skdev->state);
3181 static void skd_recover_requests(struct skd_device *skdev, int requeue)
3185 for (i = 0; i < skdev->num_req_context; i++) {
3186 struct skd_request_context *skreq = &skdev->skreq_table[i];
3188 if (skreq->state == SKD_REQ_STATE_BUSY) {
3189 skd_log_skreq(skdev, skreq, "recover");
3191 SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
3192 SKD_ASSERT(skreq->req != NULL);
3194 /* Release DMA resources for the request. */
3195 if (skreq->n_sg > 0)
3196 skd_postop_sg_list(skdev, skreq);
3199 (unsigned long) ++skreq->req->special <
3201 blk_requeue_request(skdev->queue, skreq->req);
3203 skd_end_request(skdev, skreq, -EIO);
3207 skreq->state = SKD_REQ_STATE_IDLE;
3208 skreq->id += SKD_ID_INCR;
3211 skreq[-1].next = skreq;
3214 skdev->skreq_free_list = skdev->skreq_table;
3216 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3217 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3219 if (skmsg->state == SKD_MSG_STATE_BUSY) {
3220 skd_log_skmsg(skdev, skmsg, "salvaged");
3221 SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3222 skmsg->state = SKD_MSG_STATE_IDLE;
3223 skmsg->id += SKD_ID_INCR;
3226 skmsg[-1].next = skmsg;
3229 skdev->skmsg_free_list = skdev->skmsg_table;
3231 for (i = 0; i < skdev->n_special; i++) {
3232 struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3234 /* If orphaned, reclaim it because it has already been reported
3235 * to the process as an error (it was just waiting for
3236 * a completion that didn't come, and now it will never come)
3237 * If busy, change to a state that will cause it to error
3238 * out in the wait routine and let it do the normal
3239 * reporting and reclaiming
3241 if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3242 if (skspcl->orphaned) {
3243 pr_debug("%s:%s:%d orphaned %p\n",
3244 skdev->name, __func__, __LINE__,
3246 skd_release_special(skdev, skspcl);
3248 pr_debug("%s:%s:%d not orphaned %p\n",
3249 skdev->name, __func__, __LINE__,
3251 skspcl->req.state = SKD_REQ_STATE_ABORTED;
3255 skdev->skspcl_free_list = skdev->skspcl_table;
3257 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3258 skdev->timeout_slot[i] = 0;
3260 skdev->in_flight = 0;
3263 static void skd_isr_msg_from_dev(struct skd_device *skdev)
3269 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3271 pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
3272 skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
3274 /* ignore any mtd that is an ack for something we didn't send */
3275 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3278 switch (FIT_MXD_TYPE(mfd)) {
3279 case FIT_MTD_FITFW_INIT:
3280 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3282 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
3283 pr_err("(%s): protocol mismatch\n",
3285 pr_err("(%s): got=%d support=%d\n",
3286 skdev->name, skdev->proto_ver,
3287 FIT_PROTOCOL_VERSION_1);
3288 pr_err("(%s): please upgrade driver\n",
3290 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3291 skd_soft_reset(skdev);
3294 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3295 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3296 skdev->last_mtd = mtd;
3299 case FIT_MTD_GET_CMDQ_DEPTH:
3300 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3301 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3302 SKD_N_COMPLETION_ENTRY);
3303 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3304 skdev->last_mtd = mtd;
3307 case FIT_MTD_SET_COMPQ_DEPTH:
3308 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3309 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3310 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3311 skdev->last_mtd = mtd;
3314 case FIT_MTD_SET_COMPQ_ADDR:
3315 skd_reset_skcomp(skdev);
3316 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3317 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3318 skdev->last_mtd = mtd;
3321 case FIT_MTD_CMD_LOG_HOST_ID:
3322 skdev->connect_time_stamp = get_seconds();
3323 data = skdev->connect_time_stamp & 0xFFFF;
3324 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3325 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3326 skdev->last_mtd = mtd;
3329 case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3330 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3331 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3332 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3333 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3334 skdev->last_mtd = mtd;
3337 case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3338 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3339 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3340 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3341 skdev->last_mtd = mtd;
3343 pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
3345 skdev->connect_time_stamp, skdev->drive_jiffies);
3348 case FIT_MTD_ARM_QUEUE:
3349 skdev->last_mtd = 0;
3351 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3360 static void skd_disable_interrupts(struct skd_device *skdev)
3364 sense = SKD_READL(skdev, FIT_CONTROL);
3365 sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3366 SKD_WRITEL(skdev, sense, FIT_CONTROL);
3367 pr_debug("%s:%s:%d sense 0x%x\n",
3368 skdev->name, __func__, __LINE__, sense);
3370 /* Note that the 1s is written. A 1-bit means
3371 * disable, a 0 means enable.
3373 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3376 static void skd_enable_interrupts(struct skd_device *skdev)
3380 /* unmask interrupts first */
3381 val = FIT_ISH_FW_STATE_CHANGE +
3382 FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3384 /* Note that the compliment of mask is written. A 1-bit means
3385 * disable, a 0 means enable. */
3386 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
3387 pr_debug("%s:%s:%d interrupt mask=0x%x\n",
3388 skdev->name, __func__, __LINE__, ~val);
3390 val = SKD_READL(skdev, FIT_CONTROL);
3391 val |= FIT_CR_ENABLE_INTERRUPTS;
3392 pr_debug("%s:%s:%d control=0x%x\n",
3393 skdev->name, __func__, __LINE__, val);
3394 SKD_WRITEL(skdev, val, FIT_CONTROL);
3398 *****************************************************************************
3399 * START, STOP, RESTART, QUIESCE, UNQUIESCE
3400 *****************************************************************************
3403 static void skd_soft_reset(struct skd_device *skdev)
3407 val = SKD_READL(skdev, FIT_CONTROL);
3408 val |= (FIT_CR_SOFT_RESET);
3409 pr_debug("%s:%s:%d control=0x%x\n",
3410 skdev->name, __func__, __LINE__, val);
3411 SKD_WRITEL(skdev, val, FIT_CONTROL);
3414 static void skd_start_device(struct skd_device *skdev)
3416 unsigned long flags;
3420 spin_lock_irqsave(&skdev->lock, flags);
3422 /* ack all ghost interrupts */
3423 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3425 sense = SKD_READL(skdev, FIT_STATUS);
3427 pr_debug("%s:%s:%d initial status=0x%x\n",
3428 skdev->name, __func__, __LINE__, sense);
3430 state = sense & FIT_SR_DRIVE_STATE_MASK;
3431 skdev->drive_state = state;
3432 skdev->last_mtd = 0;
3434 skdev->state = SKD_DRVR_STATE_STARTING;
3435 skdev->timer_countdown = SKD_STARTING_TIMO;
3437 skd_enable_interrupts(skdev);
3439 switch (skdev->drive_state) {
3440 case FIT_SR_DRIVE_OFFLINE:
3441 pr_err("(%s): Drive offline...\n", skd_name(skdev));
3444 case FIT_SR_DRIVE_FW_BOOTING:
3445 pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
3446 skdev->name, __func__, __LINE__, skdev->name);
3447 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3448 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3451 case FIT_SR_DRIVE_BUSY_SANITIZE:
3452 pr_info("(%s): Start: BUSY_SANITIZE\n",
3454 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3455 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3458 case FIT_SR_DRIVE_BUSY_ERASE:
3459 pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
3460 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3461 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3464 case FIT_SR_DRIVE_INIT:
3465 case FIT_SR_DRIVE_ONLINE:
3466 skd_soft_reset(skdev);
3469 case FIT_SR_DRIVE_BUSY:
3470 pr_err("(%s): Drive Busy...\n", skd_name(skdev));
3471 skdev->state = SKD_DRVR_STATE_BUSY;
3472 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3475 case FIT_SR_DRIVE_SOFT_RESET:
3476 pr_err("(%s) drive soft reset in prog\n",
3480 case FIT_SR_DRIVE_FAULT:
3481 /* Fault state is bad...soft reset won't do it...
3482 * Hard reset, maybe, but does it work on device?
3483 * For now, just fault so the system doesn't hang.
3485 skd_drive_fault(skdev);
3486 /*start the queue so we can respond with error to requests */
3487 pr_debug("%s:%s:%d starting %s queue\n",
3488 skdev->name, __func__, __LINE__, skdev->name);
3489 blk_start_queue(skdev->queue);
3490 skdev->gendisk_on = -1;
3491 wake_up_interruptible(&skdev->waitq);
3495 /* Most likely the device isn't there or isn't responding
3496 * to the BAR1 addresses. */
3497 skd_drive_disappeared(skdev);
3498 /*start the queue so we can respond with error to requests */
3499 pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
3500 skdev->name, __func__, __LINE__, skdev->name);
3501 blk_start_queue(skdev->queue);
3502 skdev->gendisk_on = -1;
3503 wake_up_interruptible(&skdev->waitq);
3507 pr_err("(%s) Start: unknown state %x\n",
3508 skd_name(skdev), skdev->drive_state);
3512 state = SKD_READL(skdev, FIT_CONTROL);
3513 pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
3514 skdev->name, __func__, __LINE__, state);
3516 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3517 pr_debug("%s:%s:%d Intr Status=0x%x\n",
3518 skdev->name, __func__, __LINE__, state);
3520 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
3521 pr_debug("%s:%s:%d Intr Mask=0x%x\n",
3522 skdev->name, __func__, __LINE__, state);
3524 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3525 pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
3526 skdev->name, __func__, __LINE__, state);
3528 state = SKD_READL(skdev, FIT_HW_VERSION);
3529 pr_debug("%s:%s:%d HW version=0x%x\n",
3530 skdev->name, __func__, __LINE__, state);
3532 spin_unlock_irqrestore(&skdev->lock, flags);
3535 static void skd_stop_device(struct skd_device *skdev)
3537 unsigned long flags;
3538 struct skd_special_context *skspcl = &skdev->internal_skspcl;
3542 spin_lock_irqsave(&skdev->lock, flags);
3544 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
3545 pr_err("(%s): skd_stop_device not online no sync\n",
3550 if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
3551 pr_err("(%s): skd_stop_device no special\n",
3556 skdev->state = SKD_DRVR_STATE_SYNCING;
3557 skdev->sync_done = 0;
3559 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
3561 spin_unlock_irqrestore(&skdev->lock, flags);
3563 wait_event_interruptible_timeout(skdev->waitq,
3564 (skdev->sync_done), (10 * HZ));
3566 spin_lock_irqsave(&skdev->lock, flags);
3568 switch (skdev->sync_done) {
3570 pr_err("(%s): skd_stop_device no sync\n",
3574 pr_err("(%s): skd_stop_device sync done\n",
3578 pr_err("(%s): skd_stop_device sync error\n",
3583 skdev->state = SKD_DRVR_STATE_STOPPING;
3584 spin_unlock_irqrestore(&skdev->lock, flags);
3586 skd_kill_timer(skdev);
3588 spin_lock_irqsave(&skdev->lock, flags);
3589 skd_disable_interrupts(skdev);
3591 /* ensure all ints on device are cleared */
3592 /* soft reset the device to unload with a clean slate */
3593 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3594 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
3596 spin_unlock_irqrestore(&skdev->lock, flags);
3598 /* poll every 100ms, 1 second timeout */
3599 for (i = 0; i < 10; i++) {
3601 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
3602 if (dev_state == FIT_SR_DRIVE_INIT)
3604 set_current_state(TASK_INTERRUPTIBLE);
3605 schedule_timeout(msecs_to_jiffies(100));
3608 if (dev_state != FIT_SR_DRIVE_INIT)
3609 pr_err("(%s): skd_stop_device state error 0x%02x\n",
3610 skd_name(skdev), dev_state);
3613 /* assume spinlock is held */
3614 static void skd_restart_device(struct skd_device *skdev)
3618 /* ack all ghost interrupts */
3619 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3621 state = SKD_READL(skdev, FIT_STATUS);
3623 pr_debug("%s:%s:%d drive status=0x%x\n",
3624 skdev->name, __func__, __LINE__, state);
3626 state &= FIT_SR_DRIVE_STATE_MASK;
3627 skdev->drive_state = state;
3628 skdev->last_mtd = 0;
3630 skdev->state = SKD_DRVR_STATE_RESTARTING;
3631 skdev->timer_countdown = SKD_RESTARTING_TIMO;
3633 skd_soft_reset(skdev);
3636 /* assume spinlock is held */
3637 static int skd_quiesce_dev(struct skd_device *skdev)
3641 switch (skdev->state) {
3642 case SKD_DRVR_STATE_BUSY:
3643 case SKD_DRVR_STATE_BUSY_IMMINENT:
3644 pr_debug("%s:%s:%d stopping %s queue\n",
3645 skdev->name, __func__, __LINE__, skdev->name);
3646 blk_stop_queue(skdev->queue);
3648 case SKD_DRVR_STATE_ONLINE:
3649 case SKD_DRVR_STATE_STOPPING:
3650 case SKD_DRVR_STATE_SYNCING:
3651 case SKD_DRVR_STATE_PAUSING:
3652 case SKD_DRVR_STATE_PAUSED:
3653 case SKD_DRVR_STATE_STARTING:
3654 case SKD_DRVR_STATE_RESTARTING:
3655 case SKD_DRVR_STATE_RESUMING:
3658 pr_debug("%s:%s:%d state [%d] not implemented\n",
3659 skdev->name, __func__, __LINE__, skdev->state);
3664 /* assume spinlock is held */
3665 static int skd_unquiesce_dev(struct skd_device *skdev)
3667 int prev_driver_state = skdev->state;
3669 skd_log_skdev(skdev, "unquiesce");
3670 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
3671 pr_debug("%s:%s:%d **** device already ONLINE\n",
3672 skdev->name, __func__, __LINE__);
3675 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
3677 * If there has been an state change to other than
3678 * ONLINE, we will rely on controller state change
3679 * to come back online and restart the queue.
3680 * The BUSY state means that driver is ready to
3681 * continue normal processing but waiting for controller
3682 * to become available.
3684 skdev->state = SKD_DRVR_STATE_BUSY;
3685 pr_debug("%s:%s:%d drive BUSY state\n",
3686 skdev->name, __func__, __LINE__);
3691 * Drive has just come online, driver is either in startup,
3692 * paused performing a task, or bust waiting for hardware.
3694 switch (skdev->state) {
3695 case SKD_DRVR_STATE_PAUSED:
3696 case SKD_DRVR_STATE_BUSY:
3697 case SKD_DRVR_STATE_BUSY_IMMINENT:
3698 case SKD_DRVR_STATE_BUSY_ERASE:
3699 case SKD_DRVR_STATE_STARTING:
3700 case SKD_DRVR_STATE_RESTARTING:
3701 case SKD_DRVR_STATE_FAULT:
3702 case SKD_DRVR_STATE_IDLE:
3703 case SKD_DRVR_STATE_LOAD:
3704 skdev->state = SKD_DRVR_STATE_ONLINE;
3705 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3707 skd_skdev_state_to_str(prev_driver_state),
3708 prev_driver_state, skd_skdev_state_to_str(skdev->state),
3710 pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
3711 skdev->name, __func__, __LINE__);
3712 pr_debug("%s:%s:%d starting %s queue\n",
3713 skdev->name, __func__, __LINE__, skdev->name);
3714 pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
3715 blk_start_queue(skdev->queue);
3716 skdev->gendisk_on = 1;
3717 wake_up_interruptible(&skdev->waitq);
3720 case SKD_DRVR_STATE_DISAPPEARED:
3722 pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
3723 skdev->name, __func__, __LINE__,
3731 *****************************************************************************
3732 * PCIe MSI/MSI-X INTERRUPT HANDLERS
3733 *****************************************************************************
3736 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
3738 struct skd_device *skdev = skd_host_data;
3739 unsigned long flags;
3741 spin_lock_irqsave(&skdev->lock, flags);
3742 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3743 skdev->name, __func__, __LINE__,
3744 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3745 pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
3746 irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
3747 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
3748 spin_unlock_irqrestore(&skdev->lock, flags);
3752 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
3754 struct skd_device *skdev = skd_host_data;
3755 unsigned long flags;
3757 spin_lock_irqsave(&skdev->lock, flags);
3758 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3759 skdev->name, __func__, __LINE__,
3760 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3761 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
3762 skd_isr_fwstate(skdev);
3763 spin_unlock_irqrestore(&skdev->lock, flags);
3767 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
3769 struct skd_device *skdev = skd_host_data;
3770 unsigned long flags;
3771 int flush_enqueued = 0;
3774 spin_lock_irqsave(&skdev->lock, flags);
3775 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3776 skdev->name, __func__, __LINE__,
3777 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3778 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
3779 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
3782 skd_request_fn(skdev->queue);
3785 schedule_work(&skdev->completion_worker);
3786 else if (!flush_enqueued)
3787 skd_request_fn(skdev->queue);
3789 spin_unlock_irqrestore(&skdev->lock, flags);
3794 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
3796 struct skd_device *skdev = skd_host_data;
3797 unsigned long flags;
3799 spin_lock_irqsave(&skdev->lock, flags);
3800 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3801 skdev->name, __func__, __LINE__,
3802 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3803 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
3804 skd_isr_msg_from_dev(skdev);
3805 spin_unlock_irqrestore(&skdev->lock, flags);
3809 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
3811 struct skd_device *skdev = skd_host_data;
3812 unsigned long flags;
3814 spin_lock_irqsave(&skdev->lock, flags);
3815 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3816 skdev->name, __func__, __LINE__,
3817 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3818 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
3819 spin_unlock_irqrestore(&skdev->lock, flags);
3824 *****************************************************************************
3825 * PCIe MSI/MSI-X SETUP
3826 *****************************************************************************
3829 struct skd_msix_entry {
3833 struct skd_device *rsp;
3837 struct skd_init_msix_entry {
3839 irq_handler_t handler;
3842 #define SKD_MAX_MSIX_COUNT 13
3843 #define SKD_MIN_MSIX_COUNT 7
3844 #define SKD_BASE_MSIX_IRQ 4
3846 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
3847 { "(DMA 0)", skd_reserved_isr },
3848 { "(DMA 1)", skd_reserved_isr },
3849 { "(DMA 2)", skd_reserved_isr },
3850 { "(DMA 3)", skd_reserved_isr },
3851 { "(State Change)", skd_statec_isr },
3852 { "(COMPL_Q)", skd_comp_q },
3853 { "(MSG)", skd_msg_isr },
3854 { "(Reserved)", skd_reserved_isr },
3855 { "(Reserved)", skd_reserved_isr },
3856 { "(Queue Full 0)", skd_qfull_isr },
3857 { "(Queue Full 1)", skd_qfull_isr },
3858 { "(Queue Full 2)", skd_qfull_isr },
3859 { "(Queue Full 3)", skd_qfull_isr },
3862 static void skd_release_msix(struct skd_device *skdev)
3864 struct skd_msix_entry *qentry;
3867 if (skdev->msix_entries) {
3868 for (i = 0; i < skdev->msix_count; i++) {
3869 qentry = &skdev->msix_entries[i];
3870 skdev = qentry->rsp;
3872 if (qentry->have_irq)
3873 devm_free_irq(&skdev->pdev->dev,
3874 qentry->vector, qentry->rsp);
3877 kfree(skdev->msix_entries);
3880 if (skdev->msix_count)
3881 pci_disable_msix(skdev->pdev);
3883 skdev->msix_count = 0;
3884 skdev->msix_entries = NULL;
3887 static int skd_acquire_msix(struct skd_device *skdev)
3890 struct pci_dev *pdev = skdev->pdev;
3891 struct msix_entry *entries;
3892 struct skd_msix_entry *qentry;
3894 entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
3899 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
3900 entries[i].entry = i;
3902 rc = pci_enable_msix_exact(pdev, entries, SKD_MAX_MSIX_COUNT);
3904 pr_err("(%s): failed to enable MSI-X %d\n",
3905 skd_name(skdev), rc);
3909 skdev->msix_count = SKD_MAX_MSIX_COUNT;
3910 skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
3911 skdev->msix_count, GFP_KERNEL);
3912 if (!skdev->msix_entries) {
3914 pr_err("(%s): msix table allocation error\n",
3919 for (i = 0; i < skdev->msix_count; i++) {
3920 qentry = &skdev->msix_entries[i];
3921 qentry->vector = entries[i].vector;
3922 qentry->entry = entries[i].entry;
3924 qentry->have_irq = 0;
3925 pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
3926 skdev->name, __func__, __LINE__,
3927 pci_name(pdev), skdev->name,
3928 i, qentry->vector, qentry->entry);
3931 /* Enable MSI-X vectors for the base queue */
3932 for (i = 0; i < skdev->msix_count; i++) {
3933 qentry = &skdev->msix_entries[i];
3934 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
3935 "%s%d-msix %s", DRV_NAME, skdev->devno,
3936 msix_entries[i].name);
3937 rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
3938 msix_entries[i].handler, 0,
3939 qentry->isr_name, skdev);
3941 pr_err("(%s): Unable to register(%d) MSI-X "
3943 skd_name(skdev), rc, i, qentry->isr_name);
3946 qentry->have_irq = 1;
3947 qentry->rsp = skdev;
3950 pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
3951 skdev->name, __func__, __LINE__,
3952 pci_name(pdev), skdev->name, skdev->msix_count);
3958 skd_release_msix(skdev);
3962 static int skd_acquire_irq(struct skd_device *skdev)
3965 struct pci_dev *pdev;
3968 skdev->msix_count = 0;
3971 switch (skdev->irq_type) {
3973 rc = skd_acquire_msix(skdev);
3975 pr_info("(%s): MSI-X %d irqs enabled\n",
3976 skd_name(skdev), skdev->msix_count);
3979 "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
3980 skd_name(skdev), rc);
3981 skdev->irq_type = SKD_IRQ_MSI;
3982 goto RETRY_IRQ_TYPE;
3986 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
3987 DRV_NAME, skdev->devno);
3988 rc = pci_enable_msi_range(pdev, 1, 1);
3990 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
3991 skdev->isr_name, skdev);
3993 pci_disable_msi(pdev);
3995 "(%s): failed to allocate the MSI interrupt %d\n",
3996 skd_name(skdev), rc);
3997 goto RETRY_IRQ_LEGACY;
3999 pr_info("(%s): MSI irq %d enabled\n",
4000 skd_name(skdev), pdev->irq);
4004 "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
4005 skd_name(skdev), rc);
4006 skdev->irq_type = SKD_IRQ_LEGACY;
4007 goto RETRY_IRQ_TYPE;
4010 case SKD_IRQ_LEGACY:
4011 snprintf(skdev->isr_name, sizeof(skdev->isr_name),
4012 "%s%d-legacy", DRV_NAME, skdev->devno);
4013 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
4014 IRQF_SHARED, skdev->isr_name, skdev);
4016 pr_info("(%s): LEGACY irq %d enabled\n",
4017 skd_name(skdev), pdev->irq);
4019 pr_err("(%s): request LEGACY irq error %d\n",
4020 skd_name(skdev), rc);
4023 pr_info("(%s): irq_type %d invalid, re-set to %d\n",
4024 skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
4025 skdev->irq_type = SKD_IRQ_LEGACY;
4026 goto RETRY_IRQ_TYPE;
4031 static void skd_release_irq(struct skd_device *skdev)
4033 switch (skdev->irq_type) {
4035 skd_release_msix(skdev);
4038 devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
4039 pci_disable_msi(skdev->pdev);
4041 case SKD_IRQ_LEGACY:
4042 devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
4045 pr_err("(%s): wrong irq type %d!",
4046 skd_name(skdev), skdev->irq_type);
4052 *****************************************************************************
4054 *****************************************************************************
4057 static int skd_cons_skcomp(struct skd_device *skdev)
4060 struct fit_completion_entry_v1 *skcomp;
4063 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
4064 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
4066 pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
4067 skdev->name, __func__, __LINE__,
4068 nbytes, SKD_N_COMPLETION_ENTRY);
4070 skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
4071 &skdev->cq_dma_address);
4073 if (skcomp == NULL) {
4078 skdev->skcomp_table = skcomp;
4079 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
4081 SKD_N_COMPLETION_ENTRY);
4087 static int skd_cons_skmsg(struct skd_device *skdev)
4092 pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
4093 skdev->name, __func__, __LINE__,
4094 sizeof(struct skd_fitmsg_context),
4095 skdev->num_fitmsg_context,
4096 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
4098 skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
4099 *skdev->num_fitmsg_context, GFP_KERNEL);
4100 if (skdev->skmsg_table == NULL) {
4105 for (i = 0; i < skdev->num_fitmsg_context; i++) {
4106 struct skd_fitmsg_context *skmsg;
4108 skmsg = &skdev->skmsg_table[i];
4110 skmsg->id = i + SKD_ID_FIT_MSG;
4112 skmsg->state = SKD_MSG_STATE_IDLE;
4113 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
4114 SKD_N_FITMSG_BYTES + 64,
4115 &skmsg->mb_dma_address);
4117 if (skmsg->msg_buf == NULL) {
4122 skmsg->offset = (u32)((u64)skmsg->msg_buf &
4123 (~FIT_QCMD_BASE_ADDRESS_MASK));
4124 skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
4125 skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
4126 FIT_QCMD_BASE_ADDRESS_MASK);
4127 skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
4128 skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
4129 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
4131 skmsg->next = &skmsg[1];
4134 /* Free list is in order starting with the 0th entry. */
4135 skdev->skmsg_table[i - 1].next = NULL;
4136 skdev->skmsg_free_list = skdev->skmsg_table;
4142 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
4144 dma_addr_t *ret_dma_addr)
4146 struct fit_sg_descriptor *sg_list;
4149 nbytes = sizeof(*sg_list) * n_sg;
4151 sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
4153 if (sg_list != NULL) {
4154 uint64_t dma_address = *ret_dma_addr;
4157 memset(sg_list, 0, nbytes);
4159 for (i = 0; i < n_sg - 1; i++) {
4161 ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
4163 sg_list[i].next_desc_ptr = dma_address + ndp_off;
4165 sg_list[i].next_desc_ptr = 0LL;
4171 static int skd_cons_skreq(struct skd_device *skdev)
4176 pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
4177 skdev->name, __func__, __LINE__,
4178 sizeof(struct skd_request_context),
4179 skdev->num_req_context,
4180 sizeof(struct skd_request_context) * skdev->num_req_context);
4182 skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
4183 * skdev->num_req_context, GFP_KERNEL);
4184 if (skdev->skreq_table == NULL) {
4189 pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
4190 skdev->name, __func__, __LINE__,
4191 skdev->sgs_per_request, sizeof(struct scatterlist),
4192 skdev->sgs_per_request * sizeof(struct scatterlist));
4194 for (i = 0; i < skdev->num_req_context; i++) {
4195 struct skd_request_context *skreq;
4197 skreq = &skdev->skreq_table[i];
4199 skreq->id = i + SKD_ID_RW_REQUEST;
4200 skreq->state = SKD_REQ_STATE_IDLE;
4202 skreq->sg = kzalloc(sizeof(struct scatterlist) *
4203 skdev->sgs_per_request, GFP_KERNEL);
4204 if (skreq->sg == NULL) {
4208 sg_init_table(skreq->sg, skdev->sgs_per_request);
4210 skreq->sksg_list = skd_cons_sg_list(skdev,
4211 skdev->sgs_per_request,
4212 &skreq->sksg_dma_address);
4214 if (skreq->sksg_list == NULL) {
4219 skreq->next = &skreq[1];
4222 /* Free list is in order starting with the 0th entry. */
4223 skdev->skreq_table[i - 1].next = NULL;
4224 skdev->skreq_free_list = skdev->skreq_table;
4230 static int skd_cons_skspcl(struct skd_device *skdev)
4235 pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4236 skdev->name, __func__, __LINE__,
4237 sizeof(struct skd_special_context),
4239 sizeof(struct skd_special_context) * skdev->n_special);
4241 skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
4242 * skdev->n_special, GFP_KERNEL);
4243 if (skdev->skspcl_table == NULL) {
4248 for (i = 0; i < skdev->n_special; i++) {
4249 struct skd_special_context *skspcl;
4251 skspcl = &skdev->skspcl_table[i];
4253 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
4254 skspcl->req.state = SKD_REQ_STATE_IDLE;
4256 skspcl->req.next = &skspcl[1].req;
4258 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4261 pci_zalloc_consistent(skdev->pdev, nbytes,
4262 &skspcl->mb_dma_address);
4263 if (skspcl->msg_buf == NULL) {
4268 skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
4269 SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
4270 if (skspcl->req.sg == NULL) {
4275 skspcl->req.sksg_list = skd_cons_sg_list(skdev,
4276 SKD_N_SG_PER_SPECIAL,
4279 if (skspcl->req.sksg_list == NULL) {
4285 /* Free list is in order starting with the 0th entry. */
4286 skdev->skspcl_table[i - 1].req.next = NULL;
4287 skdev->skspcl_free_list = skdev->skspcl_table;
4295 static int skd_cons_sksb(struct skd_device *skdev)
4298 struct skd_special_context *skspcl;
4301 skspcl = &skdev->internal_skspcl;
4303 skspcl->req.id = 0 + SKD_ID_INTERNAL;
4304 skspcl->req.state = SKD_REQ_STATE_IDLE;
4306 nbytes = SKD_N_INTERNAL_BYTES;
4308 skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4309 &skspcl->db_dma_address);
4310 if (skspcl->data_buf == NULL) {
4315 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4316 skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4317 &skspcl->mb_dma_address);
4318 if (skspcl->msg_buf == NULL) {
4323 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4324 &skspcl->req.sksg_dma_address);
4325 if (skspcl->req.sksg_list == NULL) {
4330 if (!skd_format_internal_skspcl(skdev)) {
4339 static int skd_cons_disk(struct skd_device *skdev)
4342 struct gendisk *disk;
4343 struct request_queue *q;
4344 unsigned long flags;
4346 disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4353 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4355 disk->major = skdev->major;
4356 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4357 disk->fops = &skd_blockdev_ops;
4358 disk->private_data = skdev;
4360 q = blk_init_queue(skd_request_fn, &skdev->lock);
4368 q->queuedata = skdev;
4370 blk_queue_write_cache(q, true, true);
4371 blk_queue_max_segments(q, skdev->sgs_per_request);
4372 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4374 /* set sysfs ptimal_io_size to 8K */
4375 blk_queue_io_opt(q, 8192);
4377 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4378 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
4380 spin_lock_irqsave(&skdev->lock, flags);
4381 pr_debug("%s:%s:%d stopping %s queue\n",
4382 skdev->name, __func__, __LINE__, skdev->name);
4383 blk_stop_queue(skdev->queue);
4384 spin_unlock_irqrestore(&skdev->lock, flags);
4390 #define SKD_N_DEV_TABLE 16u
4391 static u32 skd_next_devno;
4393 static struct skd_device *skd_construct(struct pci_dev *pdev)
4395 struct skd_device *skdev;
4396 int blk_major = skd_major;
4399 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4402 pr_err(PFX "(%s): memory alloc failure\n",
4407 skdev->state = SKD_DRVR_STATE_LOAD;
4409 skdev->devno = skd_next_devno++;
4410 skdev->major = blk_major;
4411 skdev->irq_type = skd_isr_type;
4412 sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
4413 skdev->dev_max_queue_depth = 0;
4415 skdev->num_req_context = skd_max_queue_depth;
4416 skdev->num_fitmsg_context = skd_max_queue_depth;
4417 skdev->n_special = skd_max_pass_thru;
4418 skdev->cur_max_queue_depth = 1;
4419 skdev->queue_low_water_mark = 1;
4420 skdev->proto_ver = 99;
4421 skdev->sgs_per_request = skd_sgs_per_request;
4422 skdev->dbg_level = skd_dbg_level;
4424 atomic_set(&skdev->device_count, 0);
4426 spin_lock_init(&skdev->lock);
4428 INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4430 pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4431 rc = skd_cons_skcomp(skdev);
4435 pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4436 rc = skd_cons_skmsg(skdev);
4440 pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4441 rc = skd_cons_skreq(skdev);
4445 pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4446 rc = skd_cons_skspcl(skdev);
4450 pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4451 rc = skd_cons_sksb(skdev);
4455 pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4456 rc = skd_cons_disk(skdev);
4460 pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
4464 pr_debug("%s:%s:%d construct failed\n",
4465 skdev->name, __func__, __LINE__);
4466 skd_destruct(skdev);
4471 *****************************************************************************
4473 *****************************************************************************
4476 static void skd_free_skcomp(struct skd_device *skdev)
4478 if (skdev->skcomp_table != NULL) {
4481 nbytes = sizeof(skdev->skcomp_table[0]) *
4482 SKD_N_COMPLETION_ENTRY;
4483 pci_free_consistent(skdev->pdev, nbytes,
4484 skdev->skcomp_table, skdev->cq_dma_address);
4487 skdev->skcomp_table = NULL;
4488 skdev->cq_dma_address = 0;
4491 static void skd_free_skmsg(struct skd_device *skdev)
4495 if (skdev->skmsg_table == NULL)
4498 for (i = 0; i < skdev->num_fitmsg_context; i++) {
4499 struct skd_fitmsg_context *skmsg;
4501 skmsg = &skdev->skmsg_table[i];
4503 if (skmsg->msg_buf != NULL) {
4504 skmsg->msg_buf += skmsg->offset;
4505 skmsg->mb_dma_address += skmsg->offset;
4506 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
4508 skmsg->mb_dma_address);
4510 skmsg->msg_buf = NULL;
4511 skmsg->mb_dma_address = 0;
4514 kfree(skdev->skmsg_table);
4515 skdev->skmsg_table = NULL;
4518 static void skd_free_sg_list(struct skd_device *skdev,
4519 struct fit_sg_descriptor *sg_list,
4520 u32 n_sg, dma_addr_t dma_addr)
4522 if (sg_list != NULL) {
4525 nbytes = sizeof(*sg_list) * n_sg;
4527 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
4531 static void skd_free_skreq(struct skd_device *skdev)
4535 if (skdev->skreq_table == NULL)
4538 for (i = 0; i < skdev->num_req_context; i++) {
4539 struct skd_request_context *skreq;
4541 skreq = &skdev->skreq_table[i];
4543 skd_free_sg_list(skdev, skreq->sksg_list,
4544 skdev->sgs_per_request,
4545 skreq->sksg_dma_address);
4547 skreq->sksg_list = NULL;
4548 skreq->sksg_dma_address = 0;
4553 kfree(skdev->skreq_table);
4554 skdev->skreq_table = NULL;
4557 static void skd_free_skspcl(struct skd_device *skdev)
4562 if (skdev->skspcl_table == NULL)
4565 for (i = 0; i < skdev->n_special; i++) {
4566 struct skd_special_context *skspcl;
4568 skspcl = &skdev->skspcl_table[i];
4570 if (skspcl->msg_buf != NULL) {
4571 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4572 pci_free_consistent(skdev->pdev, nbytes,
4574 skspcl->mb_dma_address);
4577 skspcl->msg_buf = NULL;
4578 skspcl->mb_dma_address = 0;
4580 skd_free_sg_list(skdev, skspcl->req.sksg_list,
4581 SKD_N_SG_PER_SPECIAL,
4582 skspcl->req.sksg_dma_address);
4584 skspcl->req.sksg_list = NULL;
4585 skspcl->req.sksg_dma_address = 0;
4587 kfree(skspcl->req.sg);
4590 kfree(skdev->skspcl_table);
4591 skdev->skspcl_table = NULL;
4594 static void skd_free_sksb(struct skd_device *skdev)
4596 struct skd_special_context *skspcl;
4599 skspcl = &skdev->internal_skspcl;
4601 if (skspcl->data_buf != NULL) {
4602 nbytes = SKD_N_INTERNAL_BYTES;
4604 pci_free_consistent(skdev->pdev, nbytes,
4605 skspcl->data_buf, skspcl->db_dma_address);
4608 skspcl->data_buf = NULL;
4609 skspcl->db_dma_address = 0;
4611 if (skspcl->msg_buf != NULL) {
4612 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4613 pci_free_consistent(skdev->pdev, nbytes,
4614 skspcl->msg_buf, skspcl->mb_dma_address);
4617 skspcl->msg_buf = NULL;
4618 skspcl->mb_dma_address = 0;
4620 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
4621 skspcl->req.sksg_dma_address);
4623 skspcl->req.sksg_list = NULL;
4624 skspcl->req.sksg_dma_address = 0;
4627 static void skd_free_disk(struct skd_device *skdev)
4629 struct gendisk *disk = skdev->disk;
4631 if (disk && (disk->flags & GENHD_FL_UP))
4635 blk_cleanup_queue(skdev->queue);
4636 skdev->queue = NULL;
4644 static void skd_destruct(struct skd_device *skdev)
4650 pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4651 skd_free_disk(skdev);
4653 pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4654 skd_free_sksb(skdev);
4656 pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4657 skd_free_skspcl(skdev);
4659 pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4660 skd_free_skreq(skdev);
4662 pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4663 skd_free_skmsg(skdev);
4665 pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4666 skd_free_skcomp(skdev);
4668 pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
4673 *****************************************************************************
4674 * BLOCK DEVICE (BDEV) GLUE
4675 *****************************************************************************
4678 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4680 struct skd_device *skdev;
4683 skdev = bdev->bd_disk->private_data;
4685 pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
4686 skdev->name, __func__, __LINE__,
4687 bdev->bd_disk->disk_name, current->comm);
4689 if (skdev->read_cap_is_valid) {
4690 capacity = get_capacity(skdev->disk);
4693 geo->cylinders = (capacity) / (255 * 64);
4700 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
4702 pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
4703 device_add_disk(parent, skdev->disk);
4707 static const struct block_device_operations skd_blockdev_ops = {
4708 .owner = THIS_MODULE,
4709 .ioctl = skd_bdev_ioctl,
4710 .getgeo = skd_bdev_getgeo,
4715 *****************************************************************************
4717 *****************************************************************************
4720 static const struct pci_device_id skd_pci_tbl[] = {
4721 { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
4722 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4723 { 0 } /* terminate list */
4726 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
4728 static char *skd_pci_info(struct skd_device *skdev, char *str)
4732 strcpy(str, "PCIe (");
4733 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
4738 uint16_t pcie_lstat, lspeed, lwidth;
4741 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
4742 lspeed = pcie_lstat & (0xF);
4743 lwidth = (pcie_lstat & 0x3F0) >> 4;
4746 strcat(str, "2.5GT/s ");
4747 else if (lspeed == 2)
4748 strcat(str, "5.0GT/s ");
4750 strcat(str, "<unknown> ");
4751 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
4757 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4762 struct skd_device *skdev;
4764 pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
4765 DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4766 pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
4767 pci_name(pdev), pdev->vendor, pdev->device);
4769 rc = pci_enable_device(pdev);
4772 rc = pci_request_regions(pdev, DRV_NAME);
4775 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4777 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4779 pr_err("(%s): consistent DMA mask error %d\n",
4780 pci_name(pdev), rc);
4783 (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
4786 pr_err("(%s): DMA mask error %d\n",
4787 pci_name(pdev), rc);
4788 goto err_out_regions;
4793 rc = register_blkdev(0, DRV_NAME);
4795 goto err_out_regions;
4800 skdev = skd_construct(pdev);
4801 if (skdev == NULL) {
4803 goto err_out_regions;
4806 skd_pci_info(skdev, pci_str);
4807 pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
4809 pci_set_master(pdev);
4810 rc = pci_enable_pcie_error_reporting(pdev);
4813 "(%s): bad enable of PCIe error reporting rc=%d\n",
4814 skd_name(skdev), rc);
4815 skdev->pcie_error_reporting_is_enabled = 0;
4817 skdev->pcie_error_reporting_is_enabled = 1;
4820 pci_set_drvdata(pdev, skdev);
4822 for (i = 0; i < SKD_MAX_BARS; i++) {
4823 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4824 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4825 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4826 skdev->mem_size[i]);
4827 if (!skdev->mem_map[i]) {
4828 pr_err("(%s): Unable to map adapter memory!\n",
4831 goto err_out_iounmap;
4833 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4834 skdev->name, __func__, __LINE__,
4836 (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
4839 rc = skd_acquire_irq(skdev);
4841 pr_err("(%s): interrupt resource error %d\n",
4842 skd_name(skdev), rc);
4843 goto err_out_iounmap;
4846 rc = skd_start_timer(skdev);
4850 init_waitqueue_head(&skdev->waitq);
4852 skd_start_device(skdev);
4854 rc = wait_event_interruptible_timeout(skdev->waitq,
4855 (skdev->gendisk_on),
4856 (SKD_START_WAIT_SECONDS * HZ));
4857 if (skdev->gendisk_on > 0) {
4858 /* device came on-line after reset */
4859 skd_bdev_attach(&pdev->dev, skdev);
4862 /* we timed out, something is wrong with the device,
4863 don't add the disk structure */
4865 "(%s): error: waiting for s1120 timed out %d!\n",
4866 skd_name(skdev), rc);
4867 /* in case of no error; we timeout with ENXIO */
4874 #ifdef SKD_VMK_POLL_HANDLER
4875 if (skdev->irq_type == SKD_IRQ_MSIX) {
4876 /* MSIX completion handler is being used for coredump */
4877 vmklnx_scsi_register_poll_handler(skdev->scsi_host,
4878 skdev->msix_entries[5].vector,
4881 vmklnx_scsi_register_poll_handler(skdev->scsi_host,
4882 skdev->pdev->irq, skd_isr,
4885 #endif /* SKD_VMK_POLL_HANDLER */
4890 skd_stop_device(skdev);
4891 skd_release_irq(skdev);
4894 for (i = 0; i < SKD_MAX_BARS; i++)
4895 if (skdev->mem_map[i])
4896 iounmap(skdev->mem_map[i]);
4898 if (skdev->pcie_error_reporting_is_enabled)
4899 pci_disable_pcie_error_reporting(pdev);
4901 skd_destruct(skdev);
4904 pci_release_regions(pdev);
4907 pci_disable_device(pdev);
4908 pci_set_drvdata(pdev, NULL);
4912 static void skd_pci_remove(struct pci_dev *pdev)
4915 struct skd_device *skdev;
4917 skdev = pci_get_drvdata(pdev);
4919 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4922 skd_stop_device(skdev);
4923 skd_release_irq(skdev);
4925 for (i = 0; i < SKD_MAX_BARS; i++)
4926 if (skdev->mem_map[i])
4927 iounmap((u32 *)skdev->mem_map[i]);
4929 if (skdev->pcie_error_reporting_is_enabled)
4930 pci_disable_pcie_error_reporting(pdev);
4932 skd_destruct(skdev);
4934 pci_release_regions(pdev);
4935 pci_disable_device(pdev);
4936 pci_set_drvdata(pdev, NULL);
4941 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4944 struct skd_device *skdev;
4946 skdev = pci_get_drvdata(pdev);
4948 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4952 skd_stop_device(skdev);
4954 skd_release_irq(skdev);
4956 for (i = 0; i < SKD_MAX_BARS; i++)
4957 if (skdev->mem_map[i])
4958 iounmap((u32 *)skdev->mem_map[i]);
4960 if (skdev->pcie_error_reporting_is_enabled)
4961 pci_disable_pcie_error_reporting(pdev);
4963 pci_release_regions(pdev);
4964 pci_save_state(pdev);
4965 pci_disable_device(pdev);
4966 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4970 static int skd_pci_resume(struct pci_dev *pdev)
4974 struct skd_device *skdev;
4976 skdev = pci_get_drvdata(pdev);
4978 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4982 pci_set_power_state(pdev, PCI_D0);
4983 pci_enable_wake(pdev, PCI_D0, 0);
4984 pci_restore_state(pdev);
4986 rc = pci_enable_device(pdev);
4989 rc = pci_request_regions(pdev, DRV_NAME);
4992 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4994 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4996 pr_err("(%s): consistent DMA mask error %d\n",
4997 pci_name(pdev), rc);
5000 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5003 pr_err("(%s): DMA mask error %d\n",
5004 pci_name(pdev), rc);
5005 goto err_out_regions;
5009 pci_set_master(pdev);
5010 rc = pci_enable_pcie_error_reporting(pdev);
5012 pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
5014 skdev->pcie_error_reporting_is_enabled = 0;
5016 skdev->pcie_error_reporting_is_enabled = 1;
5018 for (i = 0; i < SKD_MAX_BARS; i++) {
5020 skdev->mem_phys[i] = pci_resource_start(pdev, i);
5021 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
5022 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
5023 skdev->mem_size[i]);
5024 if (!skdev->mem_map[i]) {
5025 pr_err("(%s): Unable to map adapter memory!\n",
5028 goto err_out_iounmap;
5030 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
5031 skdev->name, __func__, __LINE__,
5033 (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
5035 rc = skd_acquire_irq(skdev);
5038 pr_err("(%s): interrupt resource error %d\n",
5039 pci_name(pdev), rc);
5040 goto err_out_iounmap;
5043 rc = skd_start_timer(skdev);
5047 init_waitqueue_head(&skdev->waitq);
5049 skd_start_device(skdev);
5054 skd_stop_device(skdev);
5055 skd_release_irq(skdev);
5058 for (i = 0; i < SKD_MAX_BARS; i++)
5059 if (skdev->mem_map[i])
5060 iounmap(skdev->mem_map[i]);
5062 if (skdev->pcie_error_reporting_is_enabled)
5063 pci_disable_pcie_error_reporting(pdev);
5066 pci_release_regions(pdev);
5069 pci_disable_device(pdev);
5073 static void skd_pci_shutdown(struct pci_dev *pdev)
5075 struct skd_device *skdev;
5077 pr_err("skd_pci_shutdown called\n");
5079 skdev = pci_get_drvdata(pdev);
5081 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5085 pr_err("%s: calling stop\n", skd_name(skdev));
5086 skd_stop_device(skdev);
5089 static struct pci_driver skd_driver = {
5091 .id_table = skd_pci_tbl,
5092 .probe = skd_pci_probe,
5093 .remove = skd_pci_remove,
5094 .suspend = skd_pci_suspend,
5095 .resume = skd_pci_resume,
5096 .shutdown = skd_pci_shutdown,
5100 *****************************************************************************
5102 *****************************************************************************
5105 static const char *skd_name(struct skd_device *skdev)
5107 memset(skdev->id_str, 0, sizeof(skdev->id_str));
5109 if (skdev->inquiry_is_valid)
5110 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
5111 skdev->name, skdev->inq_serial_num,
5112 pci_name(skdev->pdev));
5114 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
5115 skdev->name, pci_name(skdev->pdev));
5117 return skdev->id_str;
5120 const char *skd_drive_state_to_str(int state)
5123 case FIT_SR_DRIVE_OFFLINE:
5125 case FIT_SR_DRIVE_INIT:
5127 case FIT_SR_DRIVE_ONLINE:
5129 case FIT_SR_DRIVE_BUSY:
5131 case FIT_SR_DRIVE_FAULT:
5133 case FIT_SR_DRIVE_DEGRADED:
5135 case FIT_SR_PCIE_LINK_DOWN:
5137 case FIT_SR_DRIVE_SOFT_RESET:
5138 return "SOFT_RESET";
5139 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
5141 case FIT_SR_DRIVE_INIT_FAULT:
5142 return "INIT_FAULT";
5143 case FIT_SR_DRIVE_BUSY_SANITIZE:
5144 return "BUSY_SANITIZE";
5145 case FIT_SR_DRIVE_BUSY_ERASE:
5146 return "BUSY_ERASE";
5147 case FIT_SR_DRIVE_FW_BOOTING:
5148 return "FW_BOOTING";
5154 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5157 case SKD_DRVR_STATE_LOAD:
5159 case SKD_DRVR_STATE_IDLE:
5161 case SKD_DRVR_STATE_BUSY:
5163 case SKD_DRVR_STATE_STARTING:
5165 case SKD_DRVR_STATE_ONLINE:
5167 case SKD_DRVR_STATE_PAUSING:
5169 case SKD_DRVR_STATE_PAUSED:
5171 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
5172 return "DRAINING_TIMEOUT";
5173 case SKD_DRVR_STATE_RESTARTING:
5174 return "RESTARTING";
5175 case SKD_DRVR_STATE_RESUMING:
5177 case SKD_DRVR_STATE_STOPPING:
5179 case SKD_DRVR_STATE_SYNCING:
5181 case SKD_DRVR_STATE_FAULT:
5183 case SKD_DRVR_STATE_DISAPPEARED:
5184 return "DISAPPEARED";
5185 case SKD_DRVR_STATE_BUSY_ERASE:
5186 return "BUSY_ERASE";
5187 case SKD_DRVR_STATE_BUSY_SANITIZE:
5188 return "BUSY_SANITIZE";
5189 case SKD_DRVR_STATE_BUSY_IMMINENT:
5190 return "BUSY_IMMINENT";
5191 case SKD_DRVR_STATE_WAIT_BOOT:
5199 static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5202 case SKD_MSG_STATE_IDLE:
5204 case SKD_MSG_STATE_BUSY:
5211 static const char *skd_skreq_state_to_str(enum skd_req_state state)
5214 case SKD_REQ_STATE_IDLE:
5216 case SKD_REQ_STATE_SETUP:
5218 case SKD_REQ_STATE_BUSY:
5220 case SKD_REQ_STATE_COMPLETED:
5222 case SKD_REQ_STATE_TIMEOUT:
5224 case SKD_REQ_STATE_ABORTED:
5231 static void skd_log_skdev(struct skd_device *skdev, const char *event)
5233 pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
5234 skdev->name, __func__, __LINE__, skdev->name, skdev, event);
5235 pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
5236 skdev->name, __func__, __LINE__,
5237 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
5238 skd_skdev_state_to_str(skdev->state), skdev->state);
5239 pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
5240 skdev->name, __func__, __LINE__,
5241 skdev->in_flight, skdev->cur_max_queue_depth,
5242 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
5243 pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
5244 skdev->name, __func__, __LINE__,
5245 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
5248 static void skd_log_skmsg(struct skd_device *skdev,
5249 struct skd_fitmsg_context *skmsg, const char *event)
5251 pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
5252 skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
5253 pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
5254 skdev->name, __func__, __LINE__,
5255 skd_skmsg_state_to_str(skmsg->state), skmsg->state,
5256 skmsg->id, skmsg->length);
5259 static void skd_log_skreq(struct skd_device *skdev,
5260 struct skd_request_context *skreq, const char *event)
5262 pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
5263 skdev->name, __func__, __LINE__, skdev->name, skreq, event);
5264 pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
5265 skdev->name, __func__, __LINE__,
5266 skd_skreq_state_to_str(skreq->state), skreq->state,
5267 skreq->id, skreq->fitmsg_id);
5268 pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
5269 skdev->name, __func__, __LINE__,
5270 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
5272 if (skreq->req != NULL) {
5273 struct request *req = skreq->req;
5274 u32 lba = (u32)blk_rq_pos(req);
5275 u32 count = blk_rq_sectors(req);
5277 pr_debug("%s:%s:%d "
5278 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5279 skdev->name, __func__, __LINE__,
5280 req, lba, lba, count, count,
5281 (int)rq_data_dir(req));
5283 pr_debug("%s:%s:%d req=NULL\n",
5284 skdev->name, __func__, __LINE__);
5288 *****************************************************************************
5290 *****************************************************************************
5293 static int __init skd_init(void)
5295 pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
5297 switch (skd_isr_type) {
5298 case SKD_IRQ_LEGACY:
5303 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
5304 skd_isr_type, SKD_IRQ_DEFAULT);
5305 skd_isr_type = SKD_IRQ_DEFAULT;
5308 if (skd_max_queue_depth < 1 ||
5309 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
5310 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
5311 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
5312 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
5315 if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
5316 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
5317 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
5318 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
5321 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
5322 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
5323 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
5324 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
5327 if (skd_dbg_level < 0 || skd_dbg_level > 2) {
5328 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
5333 if (skd_isr_comp_limit < 0) {
5334 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
5335 skd_isr_comp_limit, 0);
5336 skd_isr_comp_limit = 0;
5339 if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
5340 pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
5341 skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5342 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5345 return pci_register_driver(&skd_driver);
5348 static void __exit skd_exit(void)
5350 pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5352 pci_unregister_driver(&skd_driver);
5355 unregister_blkdev(skd_major, DRV_NAME);
5358 module_init(skd_init);
5359 module_exit(skd_exit);