1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * SuperTrak EX Series Storage Controller driver for Linux
5 * Copyright (C) 2005-2015 Promise Technology Inc.
8 * Ed Lin <promise_linux@promise.com>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/delay.h>
15 #include <linux/slab.h>
16 #include <linux/time.h>
17 #include <linux/pci.h>
18 #include <linux/blkdev.h>
19 #include <linux/interrupt.h>
20 #include <linux/types.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/ktime.h>
24 #include <linux/reboot.h>
27 #include <asm/byteorder.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_tcq.h>
33 #include <scsi/scsi_dbg.h>
34 #include <scsi/scsi_eh.h>
36 #define DRV_NAME "stex"
37 #define ST_DRIVER_VERSION "6.02.0000.01"
38 #define ST_VER_MAJOR 6
39 #define ST_VER_MINOR 02
41 #define ST_BUILD_VER 01
44 /* MU register offset */
45 IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */
46 IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */
47 OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */
48 OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */
49 IDBL = 0x20, /* MU_INBOUND_DOORBELL */
50 IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */
51 IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */
52 ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */
53 OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */
54 OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */
68 MAILBOX_BASE = 0x1000,
69 MAILBOX_HNDSHK_STS = 0x0,
71 /* MU register value */
72 MU_INBOUND_DOORBELL_HANDSHAKE = (1 << 0),
73 MU_INBOUND_DOORBELL_REQHEADCHANGED = (1 << 1),
74 MU_INBOUND_DOORBELL_STATUSTAILCHANGED = (1 << 2),
75 MU_INBOUND_DOORBELL_HMUSTOPPED = (1 << 3),
76 MU_INBOUND_DOORBELL_RESET = (1 << 4),
78 MU_OUTBOUND_DOORBELL_HANDSHAKE = (1 << 0),
79 MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = (1 << 1),
80 MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = (1 << 2),
81 MU_OUTBOUND_DOORBELL_BUSCHANGE = (1 << 3),
82 MU_OUTBOUND_DOORBELL_HASEVENT = (1 << 4),
83 MU_OUTBOUND_DOORBELL_REQUEST_RESET = (1 << 27),
86 MU_STATE_STARTING = 1,
88 MU_STATE_RESETTING = 3,
91 MU_STATE_NOCONNECT = 6,
94 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
95 MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000,
96 MU_HARD_RESET_WAIT = 30000,
99 /* firmware returned values */
100 SRB_STATUS_SUCCESS = 0x01,
101 SRB_STATUS_ERROR = 0x04,
102 SRB_STATUS_BUSY = 0x05,
103 SRB_STATUS_INVALID_REQUEST = 0x06,
104 SRB_STATUS_SELECTION_TIMEOUT = 0x0A,
105 SRB_SEE_SENSE = 0x80,
108 TASK_ATTRIBUTE_SIMPLE = 0x0,
109 TASK_ATTRIBUTE_HEADOFQUEUE = 0x1,
110 TASK_ATTRIBUTE_ORDERED = 0x2,
111 TASK_ATTRIBUTE_ACA = 0x4,
113 SS_STS_NORMAL = 0x80000000,
114 SS_STS_DONE = 0x40000000,
115 SS_STS_HANDSHAKE = 0x20000000,
117 SS_HEAD_HANDSHAKE = 0x80,
119 SS_H2I_INT_RESET = 0x100,
121 SS_I2H_REQUEST_RESET = 0x2000,
123 SS_MU_OPERATIONAL = 0x80000000,
125 STEX_CDB_LENGTH = 16,
126 STATUS_VAR_LEN = 128,
129 SG_CF_EOT = 0x80, /* end of table */
130 SG_CF_64B = 0x40, /* 64 bit item */
131 SG_CF_HOST = 0x20, /* sg in host memory */
134 MSG_DATA_DIR_OUT = 2,
143 PASSTHRU_REQ_TYPE = 0x00000001,
144 PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
145 ST_INTERNAL_TIMEOUT = 180,
150 /* vendor specific commands of Promise */
152 SINBAND_MGT_CMD = 0xd9,
154 CONTROLLER_CMD = 0xe1,
155 DEBUGGING_CMD = 0xe2,
158 PASSTHRU_GET_ADAPTER = 0x05,
159 PASSTHRU_GET_DRVVER = 0x10,
161 CTLR_CONFIG_CMD = 0x03,
162 CTLR_SHUTDOWN = 0x0d,
164 CTLR_POWER_STATE_CHANGE = 0x0e,
165 CTLR_POWER_SAVING = 0x01,
167 PASSTHRU_SIGNATURE = 0x4e415041,
168 MGT_CMD_SIGNATURE = 0xba,
172 ST_ADDITIONAL_MEM = 0x200000,
173 ST_ADDITIONAL_MEM_MIN = 0x80000,
174 PMIC_SHUTDOWN = 0x0D,
185 u8 ctrl; /* SG_CF_xxx */
191 struct st_ss_sgitem {
203 struct st_msg_header {
211 struct handshake_frame {
212 __le64 rb_phy; /* request payload queue physical address */
213 __le16 req_sz; /* size of each request payload */
214 __le16 req_cnt; /* count of reqs the buffer can hold */
215 __le16 status_sz; /* size of each status payload */
216 __le16 status_cnt; /* count of status the buffer can hold */
217 __le64 hosttime; /* seconds from Jan 1, 1970 (GMT) */
218 u8 partner_type; /* who sends this frame */
220 __le32 partner_ver_major;
221 __le32 partner_ver_minor;
222 __le32 partner_ver_oem;
223 __le32 partner_ver_build;
224 __le32 extra_offset; /* NEW */
225 __le32 extra_size; /* NEW */
237 u8 payload_sz; /* payload size in 4-byte, not used */
238 u8 cdb[STEX_CDB_LENGTH];
249 u8 payload_sz; /* payload size in 4-byte */
250 u8 variable[STATUS_VAR_LEN];
265 struct ver_info drv_ver;
266 struct ver_info bios_ver;
297 struct scsi_cmnd *cmd;
300 unsigned int sense_bufflen;
310 void __iomem *mmio_base; /* iomapped PCI memory space */
312 dma_addr_t dma_handle;
315 struct Scsi_Host *host;
316 struct pci_dev *pdev;
318 struct req_msg * (*alloc_rq) (struct st_hba *);
319 int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
320 void (*send) (struct st_hba *, struct req_msg *, u16);
327 struct status_msg *status_buffer;
328 void *copy_buffer; /* temp buffer for driver-handled commands */
330 struct st_ccb *wait_ccb;
333 char work_q_name[20];
334 struct workqueue_struct *work_q;
335 struct work_struct reset_work;
336 wait_queue_head_t reset_waitq;
337 unsigned int mu_status;
338 unsigned int cardtype;
349 struct st_card_info {
350 struct req_msg * (*alloc_rq) (struct st_hba *);
351 int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
352 void (*send) (struct st_hba *, struct req_msg *, u16);
354 unsigned int max_lun;
355 unsigned int max_channel;
362 static int stex_halt(struct notifier_block *nb, ulong event, void *buf);
363 static struct notifier_block stex_notifier = {
368 module_param(msi, int, 0);
369 MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)");
371 static const char console_inq_page[] =
373 0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
374 0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */
375 0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */
376 0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */
377 0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */
378 0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */
379 0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */
380 0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
383 MODULE_AUTHOR("Ed Lin");
384 MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
385 MODULE_LICENSE("GPL");
386 MODULE_VERSION(ST_DRIVER_VERSION);
388 static struct status_msg *stex_get_status(struct st_hba *hba)
390 struct status_msg *status = hba->status_buffer + hba->status_tail;
393 hba->status_tail %= hba->sts_count+1;
398 static void stex_invalid_field(struct scsi_cmnd *cmd,
399 void (*done)(struct scsi_cmnd *))
401 /* "Invalid field in cdb" */
402 scsi_build_sense(cmd, 0, ILLEGAL_REQUEST, 0x24, 0x0);
406 static struct req_msg *stex_alloc_req(struct st_hba *hba)
408 struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size;
411 hba->req_head %= hba->rq_count+1;
416 static struct req_msg *stex_ss_alloc_req(struct st_hba *hba)
418 return (struct req_msg *)(hba->dma_mem +
419 hba->req_head * hba->rq_size + sizeof(struct st_msg_header));
422 static int stex_map_sg(struct st_hba *hba,
423 struct req_msg *req, struct st_ccb *ccb)
425 struct scsi_cmnd *cmd;
426 struct scatterlist *sg;
427 struct st_sgtable *dst;
428 struct st_sgitem *table;
432 nseg = scsi_dma_map(cmd);
435 dst = (struct st_sgtable *)req->variable;
437 ccb->sg_count = nseg;
438 dst->sg_count = cpu_to_le16((u16)nseg);
439 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
440 dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
442 table = (struct st_sgitem *)(dst + 1);
443 scsi_for_each_sg(cmd, sg, nseg, i) {
444 table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
445 table[i].addr = cpu_to_le64(sg_dma_address(sg));
446 table[i].ctrl = SG_CF_64B | SG_CF_HOST;
448 table[--i].ctrl |= SG_CF_EOT;
454 static int stex_ss_map_sg(struct st_hba *hba,
455 struct req_msg *req, struct st_ccb *ccb)
457 struct scsi_cmnd *cmd;
458 struct scatterlist *sg;
459 struct st_sgtable *dst;
460 struct st_ss_sgitem *table;
464 nseg = scsi_dma_map(cmd);
467 dst = (struct st_sgtable *)req->variable;
469 ccb->sg_count = nseg;
470 dst->sg_count = cpu_to_le16((u16)nseg);
471 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
472 dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
474 table = (struct st_ss_sgitem *)(dst + 1);
475 scsi_for_each_sg(cmd, sg, nseg, i) {
476 table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
478 cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
480 cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
487 static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
490 size_t count = sizeof(struct st_frame);
492 p = hba->copy_buffer;
493 scsi_sg_copy_to_buffer(ccb->cmd, p, count);
494 memset(p->base, 0, sizeof(u32)*6);
495 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
498 p->drv_ver.major = ST_VER_MAJOR;
499 p->drv_ver.minor = ST_VER_MINOR;
500 p->drv_ver.oem = ST_OEM;
501 p->drv_ver.build = ST_BUILD_VER;
503 p->bus = hba->pdev->bus->number;
504 p->slot = hba->pdev->devfn;
506 p->irq_vec = hba->pdev->irq;
507 p->id = hba->pdev->vendor << 16 | hba->pdev->device;
509 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
511 scsi_sg_copy_from_buffer(ccb->cmd, p, count);
515 stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
517 req->tag = cpu_to_le16(tag);
519 hba->ccb[tag].req = req;
522 writel(hba->req_head, hba->mmio_base + IMR0);
523 writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
524 readl(hba->mmio_base + IDBL); /* flush */
528 stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
530 struct scsi_cmnd *cmd;
531 struct st_msg_header *msg_h;
534 req->tag = cpu_to_le16(tag);
536 hba->ccb[tag].req = req;
539 cmd = hba->ccb[tag].cmd;
540 msg_h = (struct st_msg_header *)req - 1;
542 msg_h->channel = (u8)cmd->device->channel;
543 msg_h->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
545 addr = hba->dma_handle + hba->req_head * hba->rq_size;
546 addr += (hba->ccb[tag].sg_count+4)/11;
547 msg_h->handle = cpu_to_le64(addr);
550 hba->req_head %= hba->rq_count+1;
551 if (hba->cardtype == st_P3) {
552 writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
553 writel(addr, hba->mmio_base + YH2I_REQ);
555 writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
556 readl(hba->mmio_base + YH2I_REQ_HI); /* flush */
557 writel(addr, hba->mmio_base + YH2I_REQ);
558 readl(hba->mmio_base + YH2I_REQ); /* flush */
562 static void return_abnormal_state(struct st_hba *hba, int status)
568 spin_lock_irqsave(hba->host->host_lock, flags);
569 for (tag = 0; tag < hba->host->can_queue; tag++) {
570 ccb = &hba->ccb[tag];
571 if (ccb->req == NULL)
575 scsi_dma_unmap(ccb->cmd);
576 ccb->cmd->result = status << 16;
577 ccb->cmd->scsi_done(ccb->cmd);
581 spin_unlock_irqrestore(hba->host->host_lock, flags);
584 stex_slave_config(struct scsi_device *sdev)
586 sdev->use_10_for_rw = 1;
587 sdev->use_10_for_ms = 1;
588 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
594 stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
597 struct Scsi_Host *host;
598 unsigned int id, lun;
602 host = cmd->device->host;
603 id = cmd->device->id;
604 lun = cmd->device->lun;
605 hba = (struct st_hba *) &host->hostdata[0];
606 if (hba->mu_status == MU_STATE_NOCONNECT) {
607 cmd->result = DID_NO_CONNECT;
611 if (unlikely(hba->mu_status != MU_STATE_STARTED))
612 return SCSI_MLQUEUE_HOST_BUSY;
614 switch (cmd->cmnd[0]) {
617 static char ms10_caching_page[12] =
618 { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
621 page = cmd->cmnd[2] & 0x3f;
622 if (page == 0x8 || page == 0x3f) {
623 scsi_sg_copy_from_buffer(cmd, ms10_caching_page,
624 sizeof(ms10_caching_page));
625 cmd->result = DID_OK << 16;
628 stex_invalid_field(cmd, done);
633 * The shasta firmware does not report actual luns in the
634 * target, so fail the command to force sequential lun scan.
635 * Also, the console device does not support this command.
637 if (hba->cardtype == st_shasta || id == host->max_id - 1) {
638 stex_invalid_field(cmd, done);
642 case TEST_UNIT_READY:
643 if (id == host->max_id - 1) {
644 cmd->result = DID_OK << 16;
650 if (lun >= host->max_lun) {
651 cmd->result = DID_NO_CONNECT << 16;
655 if (id != host->max_id - 1)
657 if (!lun && !cmd->device->channel &&
658 (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
659 scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page,
660 sizeof(console_inq_page));
661 cmd->result = DID_OK << 16;
664 stex_invalid_field(cmd, done);
667 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
668 struct st_drvver ver;
669 size_t cp_len = sizeof(ver);
671 ver.major = ST_VER_MAJOR;
672 ver.minor = ST_VER_MINOR;
674 ver.build = ST_BUILD_VER;
675 ver.signature[0] = PASSTHRU_SIGNATURE;
676 ver.console_id = host->max_id - 1;
677 ver.host_no = hba->host->host_no;
678 cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
679 if (sizeof(ver) == cp_len)
680 cmd->result = DID_OK << 16;
682 cmd->result = DID_ERROR << 16;
691 cmd->scsi_done = done;
693 tag = scsi_cmd_to_rq(cmd)->tag;
695 if (unlikely(tag >= host->can_queue))
696 return SCSI_MLQUEUE_HOST_BUSY;
698 req = hba->alloc_rq(hba);
704 memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
706 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
707 req->data_dir = MSG_DATA_DIR_IN;
708 else if (cmd->sc_data_direction == DMA_TO_DEVICE)
709 req->data_dir = MSG_DATA_DIR_OUT;
711 req->data_dir = MSG_DATA_DIR_ND;
713 hba->ccb[tag].cmd = cmd;
714 hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
715 hba->ccb[tag].sense_buffer = cmd->sense_buffer;
717 if (!hba->map_sg(hba, req, &hba->ccb[tag])) {
718 hba->ccb[tag].sg_count = 0;
719 memset(&req->variable[0], 0, 8);
722 hba->send(hba, req, tag);
726 static DEF_SCSI_QCMD(stex_queuecommand)
728 static void stex_scsi_done(struct st_ccb *ccb)
730 struct scsi_cmnd *cmd = ccb->cmd;
733 if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) {
734 result = ccb->scsi_status;
735 switch (ccb->scsi_status) {
737 result |= DID_OK << 16;
739 case SAM_STAT_CHECK_CONDITION:
740 result |= DID_OK << 16;
743 result |= DID_BUS_BUSY << 16;
746 result |= DID_ERROR << 16;
750 else if (ccb->srb_status & SRB_SEE_SENSE)
751 result = SAM_STAT_CHECK_CONDITION;
752 else switch (ccb->srb_status) {
753 case SRB_STATUS_SELECTION_TIMEOUT:
754 result = DID_NO_CONNECT << 16;
756 case SRB_STATUS_BUSY:
757 result = DID_BUS_BUSY << 16;
759 case SRB_STATUS_INVALID_REQUEST:
760 case SRB_STATUS_ERROR:
762 result = DID_ERROR << 16;
766 cmd->result = result;
770 static void stex_copy_data(struct st_ccb *ccb,
771 struct status_msg *resp, unsigned int variable)
773 if (resp->scsi_status != SAM_STAT_GOOD) {
774 if (ccb->sense_buffer != NULL)
775 memcpy(ccb->sense_buffer, resp->variable,
776 min(variable, ccb->sense_bufflen));
780 if (ccb->cmd == NULL)
782 scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, variable);
785 static void stex_check_cmd(struct st_hba *hba,
786 struct st_ccb *ccb, struct status_msg *resp)
788 if (ccb->cmd->cmnd[0] == MGT_CMD &&
789 resp->scsi_status != SAM_STAT_CHECK_CONDITION)
790 scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
791 le32_to_cpu(*(__le32 *)&resp->variable[0]));
794 static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
796 void __iomem *base = hba->mmio_base;
797 struct status_msg *resp;
802 if (unlikely(!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED)))
805 /* status payloads */
806 hba->status_head = readl(base + OMR1);
807 if (unlikely(hba->status_head > hba->sts_count)) {
808 printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n",
809 pci_name(hba->pdev));
814 * it's not a valid status payload if:
815 * 1. there are no pending requests(e.g. during init stage)
816 * 2. there are some pending requests, but the controller is in
817 * reset status, and its type is not st_yosemite
818 * firmware of st_yosemite in reset status will return pending requests
819 * to driver, so we allow it to pass
821 if (unlikely(hba->out_req_cnt <= 0 ||
822 (hba->mu_status == MU_STATE_RESETTING &&
823 hba->cardtype != st_yosemite))) {
824 hba->status_tail = hba->status_head;
828 while (hba->status_tail != hba->status_head) {
829 resp = stex_get_status(hba);
830 tag = le16_to_cpu(resp->tag);
831 if (unlikely(tag >= hba->host->can_queue)) {
832 printk(KERN_WARNING DRV_NAME
833 "(%s): invalid tag\n", pci_name(hba->pdev));
838 ccb = &hba->ccb[tag];
839 if (unlikely(hba->wait_ccb == ccb))
840 hba->wait_ccb = NULL;
841 if (unlikely(ccb->req == NULL)) {
842 printk(KERN_WARNING DRV_NAME
843 "(%s): lagging req\n", pci_name(hba->pdev));
847 size = resp->payload_sz * sizeof(u32); /* payload size */
848 if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
849 size > sizeof(*resp))) {
850 printk(KERN_WARNING DRV_NAME "(%s): bad status size\n",
851 pci_name(hba->pdev));
853 size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */
855 stex_copy_data(ccb, resp, size);
859 ccb->srb_status = resp->srb_status;
860 ccb->scsi_status = resp->scsi_status;
862 if (likely(ccb->cmd != NULL)) {
863 if (hba->cardtype == st_yosemite)
864 stex_check_cmd(hba, ccb, resp);
866 if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
867 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
868 stex_controller_info(hba, ccb);
870 scsi_dma_unmap(ccb->cmd);
877 writel(hba->status_head, base + IMR1);
878 readl(base + IMR1); /* flush */
881 static irqreturn_t stex_intr(int irq, void *__hba)
883 struct st_hba *hba = __hba;
884 void __iomem *base = hba->mmio_base;
888 spin_lock_irqsave(hba->host->host_lock, flags);
890 data = readl(base + ODBL);
892 if (data && data != 0xffffffff) {
893 /* clear the interrupt */
894 writel(data, base + ODBL);
895 readl(base + ODBL); /* flush */
896 stex_mu_intr(hba, data);
897 spin_unlock_irqrestore(hba->host->host_lock, flags);
898 if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET &&
899 hba->cardtype == st_shasta))
900 queue_work(hba->work_q, &hba->reset_work);
904 spin_unlock_irqrestore(hba->host->host_lock, flags);
909 static void stex_ss_mu_intr(struct st_hba *hba)
911 struct status_msg *resp;
919 if (unlikely(hba->out_req_cnt <= 0 ||
920 hba->mu_status == MU_STATE_RESETTING))
923 while (count < hba->sts_count) {
924 scratch = hba->scratch + hba->status_tail;
925 value = le32_to_cpu(*scratch);
926 if (unlikely(!(value & SS_STS_NORMAL)))
929 resp = hba->status_buffer + hba->status_tail;
933 hba->status_tail %= hba->sts_count+1;
936 if (unlikely(tag >= hba->host->can_queue)) {
937 printk(KERN_WARNING DRV_NAME
938 "(%s): invalid tag\n", pci_name(hba->pdev));
943 ccb = &hba->ccb[tag];
944 if (unlikely(hba->wait_ccb == ccb))
945 hba->wait_ccb = NULL;
946 if (unlikely(ccb->req == NULL)) {
947 printk(KERN_WARNING DRV_NAME
948 "(%s): lagging req\n", pci_name(hba->pdev));
953 if (likely(value & SS_STS_DONE)) { /* normal case */
954 ccb->srb_status = SRB_STATUS_SUCCESS;
955 ccb->scsi_status = SAM_STAT_GOOD;
957 ccb->srb_status = resp->srb_status;
958 ccb->scsi_status = resp->scsi_status;
959 size = resp->payload_sz * sizeof(u32);
960 if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
961 size > sizeof(*resp))) {
962 printk(KERN_WARNING DRV_NAME
963 "(%s): bad status size\n",
964 pci_name(hba->pdev));
966 size -= sizeof(*resp) - STATUS_VAR_LEN;
968 stex_copy_data(ccb, resp, size);
970 if (likely(ccb->cmd != NULL))
971 stex_check_cmd(hba, ccb, resp);
974 if (likely(ccb->cmd != NULL)) {
975 scsi_dma_unmap(ccb->cmd);
982 static irqreturn_t stex_ss_intr(int irq, void *__hba)
984 struct st_hba *hba = __hba;
985 void __iomem *base = hba->mmio_base;
989 spin_lock_irqsave(hba->host->host_lock, flags);
991 if (hba->cardtype == st_yel) {
992 data = readl(base + YI2H_INT);
993 if (data && data != 0xffffffff) {
994 /* clear the interrupt */
995 writel(data, base + YI2H_INT_C);
996 stex_ss_mu_intr(hba);
997 spin_unlock_irqrestore(hba->host->host_lock, flags);
998 if (unlikely(data & SS_I2H_REQUEST_RESET))
999 queue_work(hba->work_q, &hba->reset_work);
1003 data = readl(base + PSCRATCH4);
1004 if (data != 0xffffffff) {
1006 /* clear the interrupt */
1007 writel(data, base + PSCRATCH1);
1008 writel((1 << 22), base + YH2I_INT);
1010 stex_ss_mu_intr(hba);
1011 spin_unlock_irqrestore(hba->host->host_lock, flags);
1012 if (unlikely(data & SS_I2H_REQUEST_RESET))
1013 queue_work(hba->work_q, &hba->reset_work);
1018 spin_unlock_irqrestore(hba->host->host_lock, flags);
1023 static int stex_common_handshake(struct st_hba *hba)
1025 void __iomem *base = hba->mmio_base;
1026 struct handshake_frame *h;
1027 dma_addr_t status_phys;
1029 unsigned long before;
1031 if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1032 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
1035 while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1036 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1037 printk(KERN_ERR DRV_NAME
1038 "(%s): no handshake signature\n",
1039 pci_name(hba->pdev));
1049 data = readl(base + OMR1);
1050 if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
1052 if (hba->host->can_queue > data) {
1053 hba->host->can_queue = data;
1054 hba->host->cmd_per_lun = data;
1058 h = (struct handshake_frame *)hba->status_buffer;
1059 h->rb_phy = cpu_to_le64(hba->dma_handle);
1060 h->req_sz = cpu_to_le16(hba->rq_size);
1061 h->req_cnt = cpu_to_le16(hba->rq_count+1);
1062 h->status_sz = cpu_to_le16(sizeof(struct status_msg));
1063 h->status_cnt = cpu_to_le16(hba->sts_count+1);
1064 h->hosttime = cpu_to_le64(ktime_get_real_seconds());
1065 h->partner_type = HMU_PARTNER_TYPE;
1066 if (hba->extra_offset) {
1067 h->extra_offset = cpu_to_le32(hba->extra_offset);
1068 h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset);
1070 h->extra_offset = h->extra_size = 0;
1072 status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size;
1073 writel(status_phys, base + IMR0);
1075 writel((status_phys >> 16) >> 16, base + IMR1);
1078 writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */
1080 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
1081 readl(base + IDBL); /* flush */
1085 while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1086 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1087 printk(KERN_ERR DRV_NAME
1088 "(%s): no signature after handshake frame\n",
1089 pci_name(hba->pdev));
1096 writel(0, base + IMR0);
1098 writel(0, base + OMR0);
1100 writel(0, base + IMR1);
1102 writel(0, base + OMR1);
1103 readl(base + OMR1); /* flush */
1107 static int stex_ss_handshake(struct st_hba *hba)
1109 void __iomem *base = hba->mmio_base;
1110 struct st_msg_header *msg_h;
1111 struct handshake_frame *h;
1113 u32 data, scratch_size, mailboxdata, operationaldata;
1114 unsigned long before;
1119 if (hba->cardtype == st_yel) {
1120 operationaldata = readl(base + YIOA_STATUS);
1121 while (operationaldata != SS_MU_OPERATIONAL) {
1122 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1123 printk(KERN_ERR DRV_NAME
1124 "(%s): firmware not operational\n",
1125 pci_name(hba->pdev));
1129 operationaldata = readl(base + YIOA_STATUS);
1132 operationaldata = readl(base + PSCRATCH3);
1133 while (operationaldata != SS_MU_OPERATIONAL) {
1134 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1135 printk(KERN_ERR DRV_NAME
1136 "(%s): firmware not operational\n",
1137 pci_name(hba->pdev));
1141 operationaldata = readl(base + PSCRATCH3);
1145 msg_h = (struct st_msg_header *)hba->dma_mem;
1146 msg_h->handle = cpu_to_le64(hba->dma_handle);
1147 msg_h->flag = SS_HEAD_HANDSHAKE;
1149 h = (struct handshake_frame *)(msg_h + 1);
1150 h->rb_phy = cpu_to_le64(hba->dma_handle);
1151 h->req_sz = cpu_to_le16(hba->rq_size);
1152 h->req_cnt = cpu_to_le16(hba->rq_count+1);
1153 h->status_sz = cpu_to_le16(sizeof(struct status_msg));
1154 h->status_cnt = cpu_to_le16(hba->sts_count+1);
1155 h->hosttime = cpu_to_le64(ktime_get_real_seconds());
1156 h->partner_type = HMU_PARTNER_TYPE;
1157 h->extra_offset = h->extra_size = 0;
1158 scratch_size = (hba->sts_count+1)*sizeof(u32);
1159 h->scratch_size = cpu_to_le32(scratch_size);
1161 if (hba->cardtype == st_yel) {
1162 data = readl(base + YINT_EN);
1164 writel(data, base + YINT_EN);
1165 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1166 readl(base + YH2I_REQ_HI);
1167 writel(hba->dma_handle, base + YH2I_REQ);
1168 readl(base + YH2I_REQ); /* flush */
1170 data = readl(base + YINT_EN);
1173 writel(data, base + YINT_EN);
1174 if (hba->msi_lock == 0) {
1175 /* P3 MSI Register cannot access twice */
1176 writel((1 << 6), base + YH2I_INT);
1179 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1180 writel(hba->dma_handle, base + YH2I_REQ);
1184 scratch = hba->scratch;
1185 if (hba->cardtype == st_yel) {
1186 while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) {
1187 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1188 printk(KERN_ERR DRV_NAME
1189 "(%s): no signature after handshake frame\n",
1190 pci_name(hba->pdev));
1198 mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS);
1199 while (mailboxdata != SS_STS_HANDSHAKE) {
1200 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1201 printk(KERN_ERR DRV_NAME
1202 "(%s): no signature after handshake frame\n",
1203 pci_name(hba->pdev));
1209 mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS);
1212 memset(scratch, 0, scratch_size);
1218 static int stex_handshake(struct st_hba *hba)
1221 unsigned long flags;
1222 unsigned int mu_status;
1224 if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1225 err = stex_ss_handshake(hba);
1227 err = stex_common_handshake(hba);
1228 spin_lock_irqsave(hba->host->host_lock, flags);
1229 mu_status = hba->mu_status;
1233 hba->status_head = 0;
1234 hba->status_tail = 0;
1235 hba->out_req_cnt = 0;
1236 hba->mu_status = MU_STATE_STARTED;
1238 hba->mu_status = MU_STATE_FAILED;
1239 if (mu_status == MU_STATE_RESETTING)
1240 wake_up_all(&hba->reset_waitq);
1241 spin_unlock_irqrestore(hba->host->host_lock, flags);
1245 static int stex_abort(struct scsi_cmnd *cmd)
1247 struct Scsi_Host *host = cmd->device->host;
1248 struct st_hba *hba = (struct st_hba *)host->hostdata;
1249 u16 tag = scsi_cmd_to_rq(cmd)->tag;
1252 int result = SUCCESS;
1253 unsigned long flags;
1255 scmd_printk(KERN_INFO, cmd, "aborting command\n");
1257 base = hba->mmio_base;
1258 spin_lock_irqsave(host->host_lock, flags);
1259 if (tag < host->can_queue &&
1260 hba->ccb[tag].req && hba->ccb[tag].cmd == cmd)
1261 hba->wait_ccb = &hba->ccb[tag];
1265 if (hba->cardtype == st_yel) {
1266 data = readl(base + YI2H_INT);
1267 if (data == 0 || data == 0xffffffff)
1270 writel(data, base + YI2H_INT_C);
1271 stex_ss_mu_intr(hba);
1272 } else if (hba->cardtype == st_P3) {
1273 data = readl(base + PSCRATCH4);
1274 if (data == 0xffffffff)
1277 writel(data, base + PSCRATCH1);
1278 writel((1 << 22), base + YH2I_INT);
1280 stex_ss_mu_intr(hba);
1282 data = readl(base + ODBL);
1283 if (data == 0 || data == 0xffffffff)
1286 writel(data, base + ODBL);
1287 readl(base + ODBL); /* flush */
1288 stex_mu_intr(hba, data);
1290 if (hba->wait_ccb == NULL) {
1291 printk(KERN_WARNING DRV_NAME
1292 "(%s): lost interrupt\n", pci_name(hba->pdev));
1297 scsi_dma_unmap(cmd);
1298 hba->wait_ccb->req = NULL; /* nullify the req's future return */
1299 hba->wait_ccb = NULL;
1302 spin_unlock_irqrestore(host->host_lock, flags);
1306 static void stex_hard_reset(struct st_hba *hba)
1308 struct pci_bus *bus;
1313 for (i = 0; i < 16; i++)
1314 pci_read_config_dword(hba->pdev, i * 4,
1315 &hba->pdev->saved_config_space[i]);
1317 /* Reset secondary bus. Our controller(MU/ATU) is the only device on
1318 secondary bus. Consult Intel 80331/3 developer's manual for detail */
1319 bus = hba->pdev->bus;
1320 pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
1321 pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
1322 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1325 * 1 ms may be enough for 8-port controllers. But 16-port controllers
1326 * require more time to finish bus reset. Use 100 ms here for safety
1329 pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
1330 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1332 for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
1333 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
1334 if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
1340 for (i = 0; i < 16; i++)
1341 pci_write_config_dword(hba->pdev, i * 4,
1342 hba->pdev->saved_config_space[i]);
1345 static int stex_yos_reset(struct st_hba *hba)
1348 unsigned long flags, before;
1351 base = hba->mmio_base;
1352 writel(MU_INBOUND_DOORBELL_RESET, base + IDBL);
1353 readl(base + IDBL); /* flush */
1355 while (hba->out_req_cnt > 0) {
1356 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1357 printk(KERN_WARNING DRV_NAME
1358 "(%s): reset timeout\n", pci_name(hba->pdev));
1365 spin_lock_irqsave(hba->host->host_lock, flags);
1367 hba->mu_status = MU_STATE_FAILED;
1369 hba->mu_status = MU_STATE_STARTED;
1370 wake_up_all(&hba->reset_waitq);
1371 spin_unlock_irqrestore(hba->host->host_lock, flags);
1376 static void stex_ss_reset(struct st_hba *hba)
1378 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1379 readl(hba->mmio_base + YH2I_INT);
1383 static void stex_p3_reset(struct st_hba *hba)
1385 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1389 static int stex_do_reset(struct st_hba *hba)
1391 unsigned long flags;
1392 unsigned int mu_status = MU_STATE_RESETTING;
1394 spin_lock_irqsave(hba->host->host_lock, flags);
1395 if (hba->mu_status == MU_STATE_STARTING) {
1396 spin_unlock_irqrestore(hba->host->host_lock, flags);
1397 printk(KERN_INFO DRV_NAME "(%s): request reset during init\n",
1398 pci_name(hba->pdev));
1401 while (hba->mu_status == MU_STATE_RESETTING) {
1402 spin_unlock_irqrestore(hba->host->host_lock, flags);
1403 wait_event_timeout(hba->reset_waitq,
1404 hba->mu_status != MU_STATE_RESETTING,
1406 spin_lock_irqsave(hba->host->host_lock, flags);
1407 mu_status = hba->mu_status;
1410 if (mu_status != MU_STATE_RESETTING) {
1411 spin_unlock_irqrestore(hba->host->host_lock, flags);
1412 return (mu_status == MU_STATE_STARTED) ? 0 : -1;
1415 hba->mu_status = MU_STATE_RESETTING;
1416 spin_unlock_irqrestore(hba->host->host_lock, flags);
1418 if (hba->cardtype == st_yosemite)
1419 return stex_yos_reset(hba);
1421 if (hba->cardtype == st_shasta)
1422 stex_hard_reset(hba);
1423 else if (hba->cardtype == st_yel)
1425 else if (hba->cardtype == st_P3)
1428 return_abnormal_state(hba, DID_RESET);
1430 if (stex_handshake(hba) == 0)
1433 printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n",
1434 pci_name(hba->pdev));
1438 static int stex_reset(struct scsi_cmnd *cmd)
1442 hba = (struct st_hba *) &cmd->device->host->hostdata[0];
1444 shost_printk(KERN_INFO, cmd->device->host,
1445 "resetting host\n");
1447 return stex_do_reset(hba) ? FAILED : SUCCESS;
1450 static void stex_reset_work(struct work_struct *work)
1452 struct st_hba *hba = container_of(work, struct st_hba, reset_work);
1457 static int stex_biosparam(struct scsi_device *sdev,
1458 struct block_device *bdev, sector_t capacity, int geom[])
1460 int heads = 255, sectors = 63;
1462 if (capacity < 0x200000) {
1467 sector_div(capacity, heads * sectors);
1476 static struct scsi_host_template driver_template = {
1477 .module = THIS_MODULE,
1479 .proc_name = DRV_NAME,
1480 .bios_param = stex_biosparam,
1481 .queuecommand = stex_queuecommand,
1482 .slave_configure = stex_slave_config,
1483 .eh_abort_handler = stex_abort,
1484 .eh_host_reset_handler = stex_reset,
1486 .dma_boundary = PAGE_SIZE - 1,
1489 static struct pci_device_id stex_pci_tbl[] = {
1491 { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1492 st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
1493 { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1494 st_shasta }, /* SuperTrak EX12350 */
1495 { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1496 st_shasta }, /* SuperTrak EX4350 */
1497 { 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1498 st_shasta }, /* SuperTrak EX24350 */
1501 { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
1504 { 0x105a, 0x8650, 0x105a, PCI_ANY_ID, 0, 0, st_yosemite },
1507 { 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq },
1510 { 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel },
1511 { 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel },
1514 { PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE,
1515 0x8870, 0, 0, st_P3 },
1517 { PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE,
1518 0x4300, 0, 0, st_P3 },
1520 /* st_P3, SymplyStor4E */
1521 { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
1522 0x4311, 0, 0, st_P3 },
1523 /* st_P3, SymplyStor8E */
1524 { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
1525 0x4312, 0, 0, st_P3 },
1526 /* st_P3, SymplyStor4 */
1527 { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
1528 0x4321, 0, 0, st_P3 },
1529 /* st_P3, SymplyStor8 */
1530 { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
1531 0x4322, 0, 0, st_P3 },
1532 { } /* terminate list */
1535 static struct st_card_info stex_card_info[] = {
1544 .alloc_rq = stex_alloc_req,
1545 .map_sg = stex_map_sg,
1546 .send = stex_send_cmd,
1557 .alloc_rq = stex_alloc_req,
1558 .map_sg = stex_map_sg,
1559 .send = stex_send_cmd,
1570 .alloc_rq = stex_alloc_req,
1571 .map_sg = stex_map_sg,
1572 .send = stex_send_cmd,
1583 .alloc_rq = stex_alloc_req,
1584 .map_sg = stex_map_sg,
1585 .send = stex_send_cmd,
1596 .alloc_rq = stex_ss_alloc_req,
1597 .map_sg = stex_ss_map_sg,
1598 .send = stex_ss_send_cmd,
1609 .alloc_rq = stex_ss_alloc_req,
1610 .map_sg = stex_ss_map_sg,
1611 .send = stex_ss_send_cmd,
1615 static int stex_request_irq(struct st_hba *hba)
1617 struct pci_dev *pdev = hba->pdev;
1620 if (msi || hba->cardtype == st_P3) {
1621 status = pci_enable_msi(pdev);
1623 printk(KERN_ERR DRV_NAME
1624 "(%s): error %d setting up MSI\n",
1625 pci_name(pdev), status);
1627 hba->msi_enabled = 1;
1629 hba->msi_enabled = 0;
1631 status = request_irq(pdev->irq,
1632 (hba->cardtype == st_yel || hba->cardtype == st_P3) ?
1633 stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba);
1636 if (hba->msi_enabled)
1637 pci_disable_msi(pdev);
1642 static void stex_free_irq(struct st_hba *hba)
1644 struct pci_dev *pdev = hba->pdev;
1646 free_irq(pdev->irq, hba);
1647 if (hba->msi_enabled)
1648 pci_disable_msi(pdev);
1651 static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1654 struct Scsi_Host *host;
1655 const struct st_card_info *ci = NULL;
1656 u32 sts_offset, cp_offset, scratch_offset;
1659 err = pci_enable_device(pdev);
1663 pci_set_master(pdev);
1666 register_reboot_notifier(&stex_notifier);
1668 host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
1671 printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n",
1677 hba = (struct st_hba *)host->hostdata;
1678 memset(hba, 0, sizeof(struct st_hba));
1680 err = pci_request_regions(pdev, DRV_NAME);
1682 printk(KERN_ERR DRV_NAME "(%s): request regions failed\n",
1684 goto out_scsi_host_put;
1687 hba->mmio_base = pci_ioremap_bar(pdev, 0);
1688 if ( !hba->mmio_base) {
1689 printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
1692 goto out_release_regions;
1695 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1697 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1699 printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
1704 hba->cardtype = (unsigned int) id->driver_data;
1705 ci = &stex_card_info[hba->cardtype];
1706 switch (id->subdevice) {
1721 if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1722 hba->supports_pm = 1;
1725 sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size;
1726 if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1727 sts_offset += (ci->sts_count+1) * sizeof(u32);
1728 cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg);
1729 hba->dma_size = cp_offset + sizeof(struct st_frame);
1730 if (hba->cardtype == st_seq ||
1731 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1732 hba->extra_offset = hba->dma_size;
1733 hba->dma_size += ST_ADDITIONAL_MEM;
1735 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1736 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1737 if (!hba->dma_mem) {
1738 /* Retry minimum coherent mapping for st_seq and st_vsc */
1739 if (hba->cardtype == st_seq ||
1740 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1741 printk(KERN_WARNING DRV_NAME
1742 "(%s): allocating min buffer for controller\n",
1744 hba->dma_size = hba->extra_offset
1745 + ST_ADDITIONAL_MEM_MIN;
1746 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1747 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1750 if (!hba->dma_mem) {
1752 printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
1758 hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
1761 printk(KERN_ERR DRV_NAME "(%s): ccb alloc failed\n",
1766 if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1767 hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset);
1768 hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset);
1769 hba->copy_buffer = hba->dma_mem + cp_offset;
1770 hba->rq_count = ci->rq_count;
1771 hba->rq_size = ci->rq_size;
1772 hba->sts_count = ci->sts_count;
1773 hba->alloc_rq = ci->alloc_rq;
1774 hba->map_sg = ci->map_sg;
1775 hba->send = ci->send;
1776 hba->mu_status = MU_STATE_STARTING;
1779 if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1780 host->sg_tablesize = 38;
1782 host->sg_tablesize = 32;
1783 host->can_queue = ci->rq_count;
1784 host->cmd_per_lun = ci->rq_count;
1785 host->max_id = ci->max_id;
1786 host->max_lun = ci->max_lun;
1787 host->max_channel = ci->max_channel;
1788 host->unique_id = host->host_no;
1789 host->max_cmd_len = STEX_CDB_LENGTH;
1793 init_waitqueue_head(&hba->reset_waitq);
1795 snprintf(hba->work_q_name, sizeof(hba->work_q_name),
1796 "stex_wq_%d", host->host_no);
1797 hba->work_q = create_singlethread_workqueue(hba->work_q_name);
1799 printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n",
1804 INIT_WORK(&hba->reset_work, stex_reset_work);
1806 err = stex_request_irq(hba);
1808 printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
1813 err = stex_handshake(hba);
1817 pci_set_drvdata(pdev, hba);
1819 err = scsi_add_host(host, &pdev->dev);
1821 printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n",
1826 scsi_scan_host(host);
1833 destroy_workqueue(hba->work_q);
1837 dma_free_coherent(&pdev->dev, hba->dma_size,
1838 hba->dma_mem, hba->dma_handle);
1840 iounmap(hba->mmio_base);
1841 out_release_regions:
1842 pci_release_regions(pdev);
1844 scsi_host_put(host);
1846 pci_disable_device(pdev);
1851 static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic)
1853 struct req_msg *req;
1854 struct st_msg_header *msg_h;
1855 unsigned long flags;
1856 unsigned long before;
1859 spin_lock_irqsave(hba->host->host_lock, flags);
1861 if ((hba->cardtype == st_yel || hba->cardtype == st_P3) &&
1862 hba->supports_pm == 1) {
1863 if (st_sleep_mic == ST_NOTHANDLED) {
1864 spin_unlock_irqrestore(hba->host->host_lock, flags);
1868 req = hba->alloc_rq(hba);
1869 if (hba->cardtype == st_yel || hba->cardtype == st_P3) {
1870 msg_h = (struct st_msg_header *)req - 1;
1871 memset(msg_h, 0, hba->rq_size);
1873 memset(req, 0, hba->rq_size);
1875 if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel
1876 || hba->cardtype == st_P3)
1877 && st_sleep_mic == ST_IGNORED) {
1878 req->cdb[0] = MGT_CMD;
1879 req->cdb[1] = MGT_CMD_SIGNATURE;
1880 req->cdb[2] = CTLR_CONFIG_CMD;
1881 req->cdb[3] = CTLR_SHUTDOWN;
1882 } else if ((hba->cardtype == st_yel || hba->cardtype == st_P3)
1883 && st_sleep_mic != ST_IGNORED) {
1884 req->cdb[0] = MGT_CMD;
1885 req->cdb[1] = MGT_CMD_SIGNATURE;
1886 req->cdb[2] = CTLR_CONFIG_CMD;
1887 req->cdb[3] = PMIC_SHUTDOWN;
1888 req->cdb[4] = st_sleep_mic;
1890 req->cdb[0] = CONTROLLER_CMD;
1891 req->cdb[1] = CTLR_POWER_STATE_CHANGE;
1892 req->cdb[2] = CTLR_POWER_SAVING;
1894 hba->ccb[tag].cmd = NULL;
1895 hba->ccb[tag].sg_count = 0;
1896 hba->ccb[tag].sense_bufflen = 0;
1897 hba->ccb[tag].sense_buffer = NULL;
1898 hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE;
1899 hba->send(hba, req, tag);
1900 spin_unlock_irqrestore(hba->host->host_lock, flags);
1902 while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
1903 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1904 hba->ccb[tag].req_type = 0;
1905 hba->mu_status = MU_STATE_STOP;
1910 hba->mu_status = MU_STATE_STOP;
1913 static void stex_hba_free(struct st_hba *hba)
1917 destroy_workqueue(hba->work_q);
1919 iounmap(hba->mmio_base);
1921 pci_release_regions(hba->pdev);
1925 dma_free_coherent(&hba->pdev->dev, hba->dma_size,
1926 hba->dma_mem, hba->dma_handle);
1929 static void stex_remove(struct pci_dev *pdev)
1931 struct st_hba *hba = pci_get_drvdata(pdev);
1933 hba->mu_status = MU_STATE_NOCONNECT;
1934 return_abnormal_state(hba, DID_NO_CONNECT);
1935 scsi_remove_host(hba->host);
1937 scsi_block_requests(hba->host);
1941 scsi_host_put(hba->host);
1943 pci_disable_device(pdev);
1945 unregister_reboot_notifier(&stex_notifier);
1948 static void stex_shutdown(struct pci_dev *pdev)
1950 struct st_hba *hba = pci_get_drvdata(pdev);
1952 if (hba->supports_pm == 0) {
1953 stex_hba_stop(hba, ST_IGNORED);
1954 } else if (hba->supports_pm == 1 && S6flag) {
1955 unregister_reboot_notifier(&stex_notifier);
1956 stex_hba_stop(hba, ST_S6);
1958 stex_hba_stop(hba, ST_S5);
1961 static int stex_choice_sleep_mic(struct st_hba *hba, pm_message_t state)
1963 switch (state.event) {
1964 case PM_EVENT_SUSPEND:
1966 case PM_EVENT_HIBERNATE:
1970 return ST_NOTHANDLED;
1974 static int stex_suspend(struct pci_dev *pdev, pm_message_t state)
1976 struct st_hba *hba = pci_get_drvdata(pdev);
1978 if ((hba->cardtype == st_yel || hba->cardtype == st_P3)
1979 && hba->supports_pm == 1)
1980 stex_hba_stop(hba, stex_choice_sleep_mic(hba, state));
1982 stex_hba_stop(hba, ST_IGNORED);
1986 static int stex_resume(struct pci_dev *pdev)
1988 struct st_hba *hba = pci_get_drvdata(pdev);
1990 hba->mu_status = MU_STATE_STARTING;
1991 stex_handshake(hba);
1995 static int stex_halt(struct notifier_block *nb, unsigned long event, void *buf)
2000 MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
2002 static struct pci_driver stex_pci_driver = {
2004 .id_table = stex_pci_tbl,
2005 .probe = stex_probe,
2006 .remove = stex_remove,
2007 .shutdown = stex_shutdown,
2008 .suspend = stex_suspend,
2009 .resume = stex_resume,
2012 static int __init stex_init(void)
2014 printk(KERN_INFO DRV_NAME
2015 ": Promise SuperTrak EX Driver version: %s\n",
2018 return pci_register_driver(&stex_pci_driver);
2021 static void __exit stex_exit(void)
2023 pci_unregister_driver(&stex_pci_driver);
2026 module_init(stex_init);
2027 module_exit(stex_exit);