2 * SuperTrak EX Series Storage Controller driver for Linux
4 * Copyright (C) 2005-2015 Promise Technology Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Ed Lin <promise_linux@promise.com>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/delay.h>
20 #include <linux/slab.h>
21 #include <linux/time.h>
22 #include <linux/pci.h>
23 #include <linux/blkdev.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/module.h>
27 #include <linux/spinlock.h>
28 #include <linux/ktime.h>
31 #include <asm/byteorder.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_cmnd.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_tcq.h>
37 #include <scsi/scsi_dbg.h>
38 #include <scsi/scsi_eh.h>
40 #define DRV_NAME "stex"
41 #define ST_DRIVER_VERSION "5.00.0000.01"
42 #define ST_VER_MAJOR 5
43 #define ST_VER_MINOR 00
45 #define ST_BUILD_VER 01
48 /* MU register offset */
49 IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */
50 IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */
51 OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */
52 OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */
53 IDBL = 0x20, /* MU_INBOUND_DOORBELL */
54 IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */
55 IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */
56 ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */
57 OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */
58 OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */
68 /* MU register value */
69 MU_INBOUND_DOORBELL_HANDSHAKE = (1 << 0),
70 MU_INBOUND_DOORBELL_REQHEADCHANGED = (1 << 1),
71 MU_INBOUND_DOORBELL_STATUSTAILCHANGED = (1 << 2),
72 MU_INBOUND_DOORBELL_HMUSTOPPED = (1 << 3),
73 MU_INBOUND_DOORBELL_RESET = (1 << 4),
75 MU_OUTBOUND_DOORBELL_HANDSHAKE = (1 << 0),
76 MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = (1 << 1),
77 MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = (1 << 2),
78 MU_OUTBOUND_DOORBELL_BUSCHANGE = (1 << 3),
79 MU_OUTBOUND_DOORBELL_HASEVENT = (1 << 4),
80 MU_OUTBOUND_DOORBELL_REQUEST_RESET = (1 << 27),
83 MU_STATE_STARTING = 1,
85 MU_STATE_RESETTING = 3,
88 MU_STATE_NOCONNECT = 6,
91 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
92 MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000,
93 MU_HARD_RESET_WAIT = 30000,
96 /* firmware returned values */
97 SRB_STATUS_SUCCESS = 0x01,
98 SRB_STATUS_ERROR = 0x04,
99 SRB_STATUS_BUSY = 0x05,
100 SRB_STATUS_INVALID_REQUEST = 0x06,
101 SRB_STATUS_SELECTION_TIMEOUT = 0x0A,
102 SRB_SEE_SENSE = 0x80,
105 TASK_ATTRIBUTE_SIMPLE = 0x0,
106 TASK_ATTRIBUTE_HEADOFQUEUE = 0x1,
107 TASK_ATTRIBUTE_ORDERED = 0x2,
108 TASK_ATTRIBUTE_ACA = 0x4,
110 SS_STS_NORMAL = 0x80000000,
111 SS_STS_DONE = 0x40000000,
112 SS_STS_HANDSHAKE = 0x20000000,
114 SS_HEAD_HANDSHAKE = 0x80,
116 SS_H2I_INT_RESET = 0x100,
118 SS_I2H_REQUEST_RESET = 0x2000,
120 SS_MU_OPERATIONAL = 0x80000000,
122 STEX_CDB_LENGTH = 16,
123 STATUS_VAR_LEN = 128,
126 SG_CF_EOT = 0x80, /* end of table */
127 SG_CF_64B = 0x40, /* 64 bit item */
128 SG_CF_HOST = 0x20, /* sg in host memory */
131 MSG_DATA_DIR_OUT = 2,
139 PASSTHRU_REQ_TYPE = 0x00000001,
140 PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
141 ST_INTERNAL_TIMEOUT = 180,
146 /* vendor specific commands of Promise */
148 SINBAND_MGT_CMD = 0xd9,
150 CONTROLLER_CMD = 0xe1,
151 DEBUGGING_CMD = 0xe2,
154 PASSTHRU_GET_ADAPTER = 0x05,
155 PASSTHRU_GET_DRVVER = 0x10,
157 CTLR_CONFIG_CMD = 0x03,
158 CTLR_SHUTDOWN = 0x0d,
160 CTLR_POWER_STATE_CHANGE = 0x0e,
161 CTLR_POWER_SAVING = 0x01,
163 PASSTHRU_SIGNATURE = 0x4e415041,
164 MGT_CMD_SIGNATURE = 0xba,
168 ST_ADDITIONAL_MEM = 0x200000,
169 ST_ADDITIONAL_MEM_MIN = 0x80000,
170 PMIC_SHUTDOWN = 0x0D,
181 u8 ctrl; /* SG_CF_xxx */
187 struct st_ss_sgitem {
199 struct st_msg_header {
207 struct handshake_frame {
208 __le64 rb_phy; /* request payload queue physical address */
209 __le16 req_sz; /* size of each request payload */
210 __le16 req_cnt; /* count of reqs the buffer can hold */
211 __le16 status_sz; /* size of each status payload */
212 __le16 status_cnt; /* count of status the buffer can hold */
213 __le64 hosttime; /* seconds from Jan 1, 1970 (GMT) */
214 u8 partner_type; /* who sends this frame */
216 __le32 partner_ver_major;
217 __le32 partner_ver_minor;
218 __le32 partner_ver_oem;
219 __le32 partner_ver_build;
220 __le32 extra_offset; /* NEW */
221 __le32 extra_size; /* NEW */
233 u8 payload_sz; /* payload size in 4-byte, not used */
234 u8 cdb[STEX_CDB_LENGTH];
245 u8 payload_sz; /* payload size in 4-byte */
246 u8 variable[STATUS_VAR_LEN];
261 struct ver_info drv_ver;
262 struct ver_info bios_ver;
293 struct scsi_cmnd *cmd;
296 unsigned int sense_bufflen;
306 void __iomem *mmio_base; /* iomapped PCI memory space */
308 dma_addr_t dma_handle;
311 struct Scsi_Host *host;
312 struct pci_dev *pdev;
314 struct req_msg * (*alloc_rq) (struct st_hba *);
315 int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
316 void (*send) (struct st_hba *, struct req_msg *, u16);
323 struct status_msg *status_buffer;
324 void *copy_buffer; /* temp buffer for driver-handled commands */
326 struct st_ccb *wait_ccb;
329 char work_q_name[20];
330 struct workqueue_struct *work_q;
331 struct work_struct reset_work;
332 wait_queue_head_t reset_waitq;
333 unsigned int mu_status;
334 unsigned int cardtype;
344 struct st_card_info {
345 struct req_msg * (*alloc_rq) (struct st_hba *);
346 int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
347 void (*send) (struct st_hba *, struct req_msg *, u16);
349 unsigned int max_lun;
350 unsigned int max_channel;
357 module_param(msi, int, 0);
358 MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)");
360 static const char console_inq_page[] =
362 0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
363 0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */
364 0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */
365 0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */
366 0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */
367 0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */
368 0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */
369 0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
372 MODULE_AUTHOR("Ed Lin");
373 MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
374 MODULE_LICENSE("GPL");
375 MODULE_VERSION(ST_DRIVER_VERSION);
377 static struct status_msg *stex_get_status(struct st_hba *hba)
379 struct status_msg *status = hba->status_buffer + hba->status_tail;
382 hba->status_tail %= hba->sts_count+1;
387 static void stex_invalid_field(struct scsi_cmnd *cmd,
388 void (*done)(struct scsi_cmnd *))
390 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
392 /* "Invalid field in cdb" */
393 scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
398 static struct req_msg *stex_alloc_req(struct st_hba *hba)
400 struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size;
403 hba->req_head %= hba->rq_count+1;
408 static struct req_msg *stex_ss_alloc_req(struct st_hba *hba)
410 return (struct req_msg *)(hba->dma_mem +
411 hba->req_head * hba->rq_size + sizeof(struct st_msg_header));
414 static int stex_map_sg(struct st_hba *hba,
415 struct req_msg *req, struct st_ccb *ccb)
417 struct scsi_cmnd *cmd;
418 struct scatterlist *sg;
419 struct st_sgtable *dst;
420 struct st_sgitem *table;
424 nseg = scsi_dma_map(cmd);
427 dst = (struct st_sgtable *)req->variable;
429 ccb->sg_count = nseg;
430 dst->sg_count = cpu_to_le16((u16)nseg);
431 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
432 dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
434 table = (struct st_sgitem *)(dst + 1);
435 scsi_for_each_sg(cmd, sg, nseg, i) {
436 table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
437 table[i].addr = cpu_to_le64(sg_dma_address(sg));
438 table[i].ctrl = SG_CF_64B | SG_CF_HOST;
440 table[--i].ctrl |= SG_CF_EOT;
446 static int stex_ss_map_sg(struct st_hba *hba,
447 struct req_msg *req, struct st_ccb *ccb)
449 struct scsi_cmnd *cmd;
450 struct scatterlist *sg;
451 struct st_sgtable *dst;
452 struct st_ss_sgitem *table;
456 nseg = scsi_dma_map(cmd);
459 dst = (struct st_sgtable *)req->variable;
461 ccb->sg_count = nseg;
462 dst->sg_count = cpu_to_le16((u16)nseg);
463 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
464 dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
466 table = (struct st_ss_sgitem *)(dst + 1);
467 scsi_for_each_sg(cmd, sg, nseg, i) {
468 table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
470 cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
472 cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
479 static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
482 size_t count = sizeof(struct st_frame);
484 p = hba->copy_buffer;
485 scsi_sg_copy_to_buffer(ccb->cmd, p, count);
486 memset(p->base, 0, sizeof(u32)*6);
487 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
490 p->drv_ver.major = ST_VER_MAJOR;
491 p->drv_ver.minor = ST_VER_MINOR;
492 p->drv_ver.oem = ST_OEM;
493 p->drv_ver.build = ST_BUILD_VER;
495 p->bus = hba->pdev->bus->number;
496 p->slot = hba->pdev->devfn;
498 p->irq_vec = hba->pdev->irq;
499 p->id = hba->pdev->vendor << 16 | hba->pdev->device;
501 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
503 scsi_sg_copy_from_buffer(ccb->cmd, p, count);
507 stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
509 req->tag = cpu_to_le16(tag);
511 hba->ccb[tag].req = req;
514 writel(hba->req_head, hba->mmio_base + IMR0);
515 writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
516 readl(hba->mmio_base + IDBL); /* flush */
520 stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
522 struct scsi_cmnd *cmd;
523 struct st_msg_header *msg_h;
526 req->tag = cpu_to_le16(tag);
528 hba->ccb[tag].req = req;
531 cmd = hba->ccb[tag].cmd;
532 msg_h = (struct st_msg_header *)req - 1;
534 msg_h->channel = (u8)cmd->device->channel;
535 msg_h->timeout = cpu_to_le16(cmd->request->timeout/HZ);
537 addr = hba->dma_handle + hba->req_head * hba->rq_size;
538 addr += (hba->ccb[tag].sg_count+4)/11;
539 msg_h->handle = cpu_to_le64(addr);
542 hba->req_head %= hba->rq_count+1;
544 writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
545 readl(hba->mmio_base + YH2I_REQ_HI); /* flush */
546 writel(addr, hba->mmio_base + YH2I_REQ);
547 readl(hba->mmio_base + YH2I_REQ); /* flush */
550 static void return_abnormal_state(struct st_hba *hba, int status)
556 spin_lock_irqsave(hba->host->host_lock, flags);
557 for (tag = 0; tag < hba->host->can_queue; tag++) {
558 ccb = &hba->ccb[tag];
559 if (ccb->req == NULL)
563 scsi_dma_unmap(ccb->cmd);
564 ccb->cmd->result = status << 16;
565 ccb->cmd->scsi_done(ccb->cmd);
569 spin_unlock_irqrestore(hba->host->host_lock, flags);
572 stex_slave_config(struct scsi_device *sdev)
574 sdev->use_10_for_rw = 1;
575 sdev->use_10_for_ms = 1;
576 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
582 stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
585 struct Scsi_Host *host;
586 unsigned int id, lun;
590 host = cmd->device->host;
591 id = cmd->device->id;
592 lun = cmd->device->lun;
593 hba = (struct st_hba *) &host->hostdata[0];
594 if (hba->mu_status == MU_STATE_NOCONNECT) {
595 cmd->result = DID_NO_CONNECT;
599 if (unlikely(hba->mu_status != MU_STATE_STARTED))
600 return SCSI_MLQUEUE_HOST_BUSY;
602 switch (cmd->cmnd[0]) {
605 static char ms10_caching_page[12] =
606 { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
609 page = cmd->cmnd[2] & 0x3f;
610 if (page == 0x8 || page == 0x3f) {
611 scsi_sg_copy_from_buffer(cmd, ms10_caching_page,
612 sizeof(ms10_caching_page));
613 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
616 stex_invalid_field(cmd, done);
621 * The shasta firmware does not report actual luns in the
622 * target, so fail the command to force sequential lun scan.
623 * Also, the console device does not support this command.
625 if (hba->cardtype == st_shasta || id == host->max_id - 1) {
626 stex_invalid_field(cmd, done);
630 case TEST_UNIT_READY:
631 if (id == host->max_id - 1) {
632 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
638 if (lun >= host->max_lun) {
639 cmd->result = DID_NO_CONNECT << 16;
643 if (id != host->max_id - 1)
645 if (!lun && !cmd->device->channel &&
646 (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
647 scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page,
648 sizeof(console_inq_page));
649 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
652 stex_invalid_field(cmd, done);
655 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
656 const struct st_drvver ver = {
657 .major = ST_VER_MAJOR,
658 .minor = ST_VER_MINOR,
660 .build = ST_BUILD_VER,
661 .signature[0] = PASSTHRU_SIGNATURE,
662 .console_id = host->max_id - 1,
663 .host_no = hba->host->host_no,
665 size_t cp_len = sizeof(ver);
667 cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
668 cmd->result = sizeof(ver) == cp_len ?
669 DID_OK << 16 | COMMAND_COMPLETE << 8 :
670 DID_ERROR << 16 | COMMAND_COMPLETE << 8;
678 cmd->scsi_done = done;
680 tag = cmd->request->tag;
682 if (unlikely(tag >= host->can_queue))
683 return SCSI_MLQUEUE_HOST_BUSY;
685 req = hba->alloc_rq(hba);
691 memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
693 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
694 req->data_dir = MSG_DATA_DIR_IN;
695 else if (cmd->sc_data_direction == DMA_TO_DEVICE)
696 req->data_dir = MSG_DATA_DIR_OUT;
698 req->data_dir = MSG_DATA_DIR_ND;
700 hba->ccb[tag].cmd = cmd;
701 hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
702 hba->ccb[tag].sense_buffer = cmd->sense_buffer;
704 if (!hba->map_sg(hba, req, &hba->ccb[tag])) {
705 hba->ccb[tag].sg_count = 0;
706 memset(&req->variable[0], 0, 8);
709 hba->send(hba, req, tag);
713 static DEF_SCSI_QCMD(stex_queuecommand)
715 static void stex_scsi_done(struct st_ccb *ccb)
717 struct scsi_cmnd *cmd = ccb->cmd;
720 if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) {
721 result = ccb->scsi_status;
722 switch (ccb->scsi_status) {
724 result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
726 case SAM_STAT_CHECK_CONDITION:
727 result |= DRIVER_SENSE << 24;
730 result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
733 result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
737 else if (ccb->srb_status & SRB_SEE_SENSE)
738 result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
739 else switch (ccb->srb_status) {
740 case SRB_STATUS_SELECTION_TIMEOUT:
741 result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
743 case SRB_STATUS_BUSY:
744 result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
746 case SRB_STATUS_INVALID_REQUEST:
747 case SRB_STATUS_ERROR:
749 result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
753 cmd->result = result;
757 static void stex_copy_data(struct st_ccb *ccb,
758 struct status_msg *resp, unsigned int variable)
760 if (resp->scsi_status != SAM_STAT_GOOD) {
761 if (ccb->sense_buffer != NULL)
762 memcpy(ccb->sense_buffer, resp->variable,
763 min(variable, ccb->sense_bufflen));
767 if (ccb->cmd == NULL)
769 scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, variable);
772 static void stex_check_cmd(struct st_hba *hba,
773 struct st_ccb *ccb, struct status_msg *resp)
775 if (ccb->cmd->cmnd[0] == MGT_CMD &&
776 resp->scsi_status != SAM_STAT_CHECK_CONDITION)
777 scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
778 le32_to_cpu(*(__le32 *)&resp->variable[0]));
781 static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
783 void __iomem *base = hba->mmio_base;
784 struct status_msg *resp;
789 if (unlikely(!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED)))
792 /* status payloads */
793 hba->status_head = readl(base + OMR1);
794 if (unlikely(hba->status_head > hba->sts_count)) {
795 printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n",
796 pci_name(hba->pdev));
801 * it's not a valid status payload if:
802 * 1. there are no pending requests(e.g. during init stage)
803 * 2. there are some pending requests, but the controller is in
804 * reset status, and its type is not st_yosemite
805 * firmware of st_yosemite in reset status will return pending requests
806 * to driver, so we allow it to pass
808 if (unlikely(hba->out_req_cnt <= 0 ||
809 (hba->mu_status == MU_STATE_RESETTING &&
810 hba->cardtype != st_yosemite))) {
811 hba->status_tail = hba->status_head;
815 while (hba->status_tail != hba->status_head) {
816 resp = stex_get_status(hba);
817 tag = le16_to_cpu(resp->tag);
818 if (unlikely(tag >= hba->host->can_queue)) {
819 printk(KERN_WARNING DRV_NAME
820 "(%s): invalid tag\n", pci_name(hba->pdev));
825 ccb = &hba->ccb[tag];
826 if (unlikely(hba->wait_ccb == ccb))
827 hba->wait_ccb = NULL;
828 if (unlikely(ccb->req == NULL)) {
829 printk(KERN_WARNING DRV_NAME
830 "(%s): lagging req\n", pci_name(hba->pdev));
834 size = resp->payload_sz * sizeof(u32); /* payload size */
835 if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
836 size > sizeof(*resp))) {
837 printk(KERN_WARNING DRV_NAME "(%s): bad status size\n",
838 pci_name(hba->pdev));
840 size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */
842 stex_copy_data(ccb, resp, size);
846 ccb->srb_status = resp->srb_status;
847 ccb->scsi_status = resp->scsi_status;
849 if (likely(ccb->cmd != NULL)) {
850 if (hba->cardtype == st_yosemite)
851 stex_check_cmd(hba, ccb, resp);
853 if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
854 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
855 stex_controller_info(hba, ccb);
857 scsi_dma_unmap(ccb->cmd);
864 writel(hba->status_head, base + IMR1);
865 readl(base + IMR1); /* flush */
868 static irqreturn_t stex_intr(int irq, void *__hba)
870 struct st_hba *hba = __hba;
871 void __iomem *base = hba->mmio_base;
875 spin_lock_irqsave(hba->host->host_lock, flags);
877 data = readl(base + ODBL);
879 if (data && data != 0xffffffff) {
880 /* clear the interrupt */
881 writel(data, base + ODBL);
882 readl(base + ODBL); /* flush */
883 stex_mu_intr(hba, data);
884 spin_unlock_irqrestore(hba->host->host_lock, flags);
885 if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET &&
886 hba->cardtype == st_shasta))
887 queue_work(hba->work_q, &hba->reset_work);
891 spin_unlock_irqrestore(hba->host->host_lock, flags);
896 static void stex_ss_mu_intr(struct st_hba *hba)
898 struct status_msg *resp;
906 if (unlikely(hba->out_req_cnt <= 0 ||
907 hba->mu_status == MU_STATE_RESETTING))
910 while (count < hba->sts_count) {
911 scratch = hba->scratch + hba->status_tail;
912 value = le32_to_cpu(*scratch);
913 if (unlikely(!(value & SS_STS_NORMAL)))
916 resp = hba->status_buffer + hba->status_tail;
920 hba->status_tail %= hba->sts_count+1;
923 if (unlikely(tag >= hba->host->can_queue)) {
924 printk(KERN_WARNING DRV_NAME
925 "(%s): invalid tag\n", pci_name(hba->pdev));
930 ccb = &hba->ccb[tag];
931 if (unlikely(hba->wait_ccb == ccb))
932 hba->wait_ccb = NULL;
933 if (unlikely(ccb->req == NULL)) {
934 printk(KERN_WARNING DRV_NAME
935 "(%s): lagging req\n", pci_name(hba->pdev));
940 if (likely(value & SS_STS_DONE)) { /* normal case */
941 ccb->srb_status = SRB_STATUS_SUCCESS;
942 ccb->scsi_status = SAM_STAT_GOOD;
944 ccb->srb_status = resp->srb_status;
945 ccb->scsi_status = resp->scsi_status;
946 size = resp->payload_sz * sizeof(u32);
947 if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
948 size > sizeof(*resp))) {
949 printk(KERN_WARNING DRV_NAME
950 "(%s): bad status size\n",
951 pci_name(hba->pdev));
953 size -= sizeof(*resp) - STATUS_VAR_LEN;
955 stex_copy_data(ccb, resp, size);
957 if (likely(ccb->cmd != NULL))
958 stex_check_cmd(hba, ccb, resp);
961 if (likely(ccb->cmd != NULL)) {
962 scsi_dma_unmap(ccb->cmd);
969 static irqreturn_t stex_ss_intr(int irq, void *__hba)
971 struct st_hba *hba = __hba;
972 void __iomem *base = hba->mmio_base;
976 spin_lock_irqsave(hba->host->host_lock, flags);
978 data = readl(base + YI2H_INT);
979 if (data && data != 0xffffffff) {
980 /* clear the interrupt */
981 writel(data, base + YI2H_INT_C);
982 stex_ss_mu_intr(hba);
983 spin_unlock_irqrestore(hba->host->host_lock, flags);
984 if (unlikely(data & SS_I2H_REQUEST_RESET))
985 queue_work(hba->work_q, &hba->reset_work);
989 spin_unlock_irqrestore(hba->host->host_lock, flags);
994 static int stex_common_handshake(struct st_hba *hba)
996 void __iomem *base = hba->mmio_base;
997 struct handshake_frame *h;
998 dma_addr_t status_phys;
1000 unsigned long before;
1002 if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1003 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
1006 while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1007 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1008 printk(KERN_ERR DRV_NAME
1009 "(%s): no handshake signature\n",
1010 pci_name(hba->pdev));
1020 data = readl(base + OMR1);
1021 if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
1023 if (hba->host->can_queue > data) {
1024 hba->host->can_queue = data;
1025 hba->host->cmd_per_lun = data;
1029 h = (struct handshake_frame *)hba->status_buffer;
1030 h->rb_phy = cpu_to_le64(hba->dma_handle);
1031 h->req_sz = cpu_to_le16(hba->rq_size);
1032 h->req_cnt = cpu_to_le16(hba->rq_count+1);
1033 h->status_sz = cpu_to_le16(sizeof(struct status_msg));
1034 h->status_cnt = cpu_to_le16(hba->sts_count+1);
1035 h->hosttime = cpu_to_le64(ktime_get_real_seconds());
1036 h->partner_type = HMU_PARTNER_TYPE;
1037 if (hba->extra_offset) {
1038 h->extra_offset = cpu_to_le32(hba->extra_offset);
1039 h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset);
1041 h->extra_offset = h->extra_size = 0;
1043 status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size;
1044 writel(status_phys, base + IMR0);
1046 writel((status_phys >> 16) >> 16, base + IMR1);
1049 writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */
1051 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
1052 readl(base + IDBL); /* flush */
1056 while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1057 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1058 printk(KERN_ERR DRV_NAME
1059 "(%s): no signature after handshake frame\n",
1060 pci_name(hba->pdev));
1067 writel(0, base + IMR0);
1069 writel(0, base + OMR0);
1071 writel(0, base + IMR1);
1073 writel(0, base + OMR1);
1074 readl(base + OMR1); /* flush */
1078 static int stex_ss_handshake(struct st_hba *hba)
1080 void __iomem *base = hba->mmio_base;
1081 struct st_msg_header *msg_h;
1082 struct handshake_frame *h;
1084 u32 data, scratch_size;
1085 unsigned long before;
1089 while ((readl(base + YIOA_STATUS) & SS_MU_OPERATIONAL) == 0) {
1090 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1091 printk(KERN_ERR DRV_NAME
1092 "(%s): firmware not operational\n",
1093 pci_name(hba->pdev));
1099 msg_h = (struct st_msg_header *)hba->dma_mem;
1100 msg_h->handle = cpu_to_le64(hba->dma_handle);
1101 msg_h->flag = SS_HEAD_HANDSHAKE;
1103 h = (struct handshake_frame *)(msg_h + 1);
1104 h->rb_phy = cpu_to_le64(hba->dma_handle);
1105 h->req_sz = cpu_to_le16(hba->rq_size);
1106 h->req_cnt = cpu_to_le16(hba->rq_count+1);
1107 h->status_sz = cpu_to_le16(sizeof(struct status_msg));
1108 h->status_cnt = cpu_to_le16(hba->sts_count+1);
1109 h->hosttime = cpu_to_le64(ktime_get_real_seconds());
1110 h->partner_type = HMU_PARTNER_TYPE;
1111 h->extra_offset = h->extra_size = 0;
1112 scratch_size = (hba->sts_count+1)*sizeof(u32);
1113 h->scratch_size = cpu_to_le32(scratch_size);
1115 data = readl(base + YINT_EN);
1117 writel(data, base + YINT_EN);
1118 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1119 readl(base + YH2I_REQ_HI);
1120 writel(hba->dma_handle, base + YH2I_REQ);
1121 readl(base + YH2I_REQ); /* flush */
1123 scratch = hba->scratch;
1125 while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) {
1126 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1127 printk(KERN_ERR DRV_NAME
1128 "(%s): no signature after handshake frame\n",
1129 pci_name(hba->pdev));
1137 memset(scratch, 0, scratch_size);
1142 static int stex_handshake(struct st_hba *hba)
1145 unsigned long flags;
1146 unsigned int mu_status;
1148 err = (hba->cardtype == st_yel) ?
1149 stex_ss_handshake(hba) : stex_common_handshake(hba);
1150 spin_lock_irqsave(hba->host->host_lock, flags);
1151 mu_status = hba->mu_status;
1155 hba->status_head = 0;
1156 hba->status_tail = 0;
1157 hba->out_req_cnt = 0;
1158 hba->mu_status = MU_STATE_STARTED;
1160 hba->mu_status = MU_STATE_FAILED;
1161 if (mu_status == MU_STATE_RESETTING)
1162 wake_up_all(&hba->reset_waitq);
1163 spin_unlock_irqrestore(hba->host->host_lock, flags);
1167 static int stex_abort(struct scsi_cmnd *cmd)
1169 struct Scsi_Host *host = cmd->device->host;
1170 struct st_hba *hba = (struct st_hba *)host->hostdata;
1171 u16 tag = cmd->request->tag;
1174 int result = SUCCESS;
1175 unsigned long flags;
1177 scmd_printk(KERN_INFO, cmd, "aborting command\n");
1179 base = hba->mmio_base;
1180 spin_lock_irqsave(host->host_lock, flags);
1181 if (tag < host->can_queue &&
1182 hba->ccb[tag].req && hba->ccb[tag].cmd == cmd)
1183 hba->wait_ccb = &hba->ccb[tag];
1187 if (hba->cardtype == st_yel) {
1188 data = readl(base + YI2H_INT);
1189 if (data == 0 || data == 0xffffffff)
1192 writel(data, base + YI2H_INT_C);
1193 stex_ss_mu_intr(hba);
1195 data = readl(base + ODBL);
1196 if (data == 0 || data == 0xffffffff)
1199 writel(data, base + ODBL);
1200 readl(base + ODBL); /* flush */
1202 stex_mu_intr(hba, data);
1204 if (hba->wait_ccb == NULL) {
1205 printk(KERN_WARNING DRV_NAME
1206 "(%s): lost interrupt\n", pci_name(hba->pdev));
1211 scsi_dma_unmap(cmd);
1212 hba->wait_ccb->req = NULL; /* nullify the req's future return */
1213 hba->wait_ccb = NULL;
1216 spin_unlock_irqrestore(host->host_lock, flags);
1220 static void stex_hard_reset(struct st_hba *hba)
1222 struct pci_bus *bus;
1227 for (i = 0; i < 16; i++)
1228 pci_read_config_dword(hba->pdev, i * 4,
1229 &hba->pdev->saved_config_space[i]);
1231 /* Reset secondary bus. Our controller(MU/ATU) is the only device on
1232 secondary bus. Consult Intel 80331/3 developer's manual for detail */
1233 bus = hba->pdev->bus;
1234 pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
1235 pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
1236 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1239 * 1 ms may be enough for 8-port controllers. But 16-port controllers
1240 * require more time to finish bus reset. Use 100 ms here for safety
1243 pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
1244 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1246 for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
1247 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
1248 if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
1254 for (i = 0; i < 16; i++)
1255 pci_write_config_dword(hba->pdev, i * 4,
1256 hba->pdev->saved_config_space[i]);
1259 static int stex_yos_reset(struct st_hba *hba)
1262 unsigned long flags, before;
1265 base = hba->mmio_base;
1266 writel(MU_INBOUND_DOORBELL_RESET, base + IDBL);
1267 readl(base + IDBL); /* flush */
1269 while (hba->out_req_cnt > 0) {
1270 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1271 printk(KERN_WARNING DRV_NAME
1272 "(%s): reset timeout\n", pci_name(hba->pdev));
1279 spin_lock_irqsave(hba->host->host_lock, flags);
1281 hba->mu_status = MU_STATE_FAILED;
1283 hba->mu_status = MU_STATE_STARTED;
1284 wake_up_all(&hba->reset_waitq);
1285 spin_unlock_irqrestore(hba->host->host_lock, flags);
1290 static void stex_ss_reset(struct st_hba *hba)
1292 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1293 readl(hba->mmio_base + YH2I_INT);
1297 static int stex_do_reset(struct st_hba *hba)
1299 unsigned long flags;
1300 unsigned int mu_status = MU_STATE_RESETTING;
1302 spin_lock_irqsave(hba->host->host_lock, flags);
1303 if (hba->mu_status == MU_STATE_STARTING) {
1304 spin_unlock_irqrestore(hba->host->host_lock, flags);
1305 printk(KERN_INFO DRV_NAME "(%s): request reset during init\n",
1306 pci_name(hba->pdev));
1309 while (hba->mu_status == MU_STATE_RESETTING) {
1310 spin_unlock_irqrestore(hba->host->host_lock, flags);
1311 wait_event_timeout(hba->reset_waitq,
1312 hba->mu_status != MU_STATE_RESETTING,
1314 spin_lock_irqsave(hba->host->host_lock, flags);
1315 mu_status = hba->mu_status;
1318 if (mu_status != MU_STATE_RESETTING) {
1319 spin_unlock_irqrestore(hba->host->host_lock, flags);
1320 return (mu_status == MU_STATE_STARTED) ? 0 : -1;
1323 hba->mu_status = MU_STATE_RESETTING;
1324 spin_unlock_irqrestore(hba->host->host_lock, flags);
1326 if (hba->cardtype == st_yosemite)
1327 return stex_yos_reset(hba);
1329 if (hba->cardtype == st_shasta)
1330 stex_hard_reset(hba);
1331 else if (hba->cardtype == st_yel)
1335 return_abnormal_state(hba, DID_RESET);
1337 if (stex_handshake(hba) == 0)
1340 printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n",
1341 pci_name(hba->pdev));
1345 static int stex_reset(struct scsi_cmnd *cmd)
1349 hba = (struct st_hba *) &cmd->device->host->hostdata[0];
1351 shost_printk(KERN_INFO, cmd->device->host,
1352 "resetting host\n");
1354 return stex_do_reset(hba) ? FAILED : SUCCESS;
1357 static void stex_reset_work(struct work_struct *work)
1359 struct st_hba *hba = container_of(work, struct st_hba, reset_work);
1364 static int stex_biosparam(struct scsi_device *sdev,
1365 struct block_device *bdev, sector_t capacity, int geom[])
1367 int heads = 255, sectors = 63;
1369 if (capacity < 0x200000) {
1374 sector_div(capacity, heads * sectors);
1383 static struct scsi_host_template driver_template = {
1384 .module = THIS_MODULE,
1386 .proc_name = DRV_NAME,
1387 .bios_param = stex_biosparam,
1388 .queuecommand = stex_queuecommand,
1389 .slave_configure = stex_slave_config,
1390 .eh_abort_handler = stex_abort,
1391 .eh_host_reset_handler = stex_reset,
1395 static struct pci_device_id stex_pci_tbl[] = {
1397 { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1398 st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
1399 { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1400 st_shasta }, /* SuperTrak EX12350 */
1401 { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1402 st_shasta }, /* SuperTrak EX4350 */
1403 { 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1404 st_shasta }, /* SuperTrak EX24350 */
1407 { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
1410 { 0x105a, 0x8650, 0x105a, PCI_ANY_ID, 0, 0, st_yosemite },
1413 { 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq },
1416 { 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel },
1417 { 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel },
1418 { } /* terminate list */
1421 static struct st_card_info stex_card_info[] = {
1430 .alloc_rq = stex_alloc_req,
1431 .map_sg = stex_map_sg,
1432 .send = stex_send_cmd,
1443 .alloc_rq = stex_alloc_req,
1444 .map_sg = stex_map_sg,
1445 .send = stex_send_cmd,
1456 .alloc_rq = stex_alloc_req,
1457 .map_sg = stex_map_sg,
1458 .send = stex_send_cmd,
1469 .alloc_rq = stex_alloc_req,
1470 .map_sg = stex_map_sg,
1471 .send = stex_send_cmd,
1482 .alloc_rq = stex_ss_alloc_req,
1483 .map_sg = stex_ss_map_sg,
1484 .send = stex_ss_send_cmd,
1488 static int stex_set_dma_mask(struct pci_dev * pdev)
1492 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
1493 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1495 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1497 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1501 static int stex_request_irq(struct st_hba *hba)
1503 struct pci_dev *pdev = hba->pdev;
1507 status = pci_enable_msi(pdev);
1509 printk(KERN_ERR DRV_NAME
1510 "(%s): error %d setting up MSI\n",
1511 pci_name(pdev), status);
1513 hba->msi_enabled = 1;
1515 hba->msi_enabled = 0;
1517 status = request_irq(pdev->irq, hba->cardtype == st_yel ?
1518 stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba);
1521 if (hba->msi_enabled)
1522 pci_disable_msi(pdev);
1527 static void stex_free_irq(struct st_hba *hba)
1529 struct pci_dev *pdev = hba->pdev;
1531 free_irq(pdev->irq, hba);
1532 if (hba->msi_enabled)
1533 pci_disable_msi(pdev);
1536 static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1539 struct Scsi_Host *host;
1540 const struct st_card_info *ci = NULL;
1541 u32 sts_offset, cp_offset, scratch_offset;
1544 err = pci_enable_device(pdev);
1548 pci_set_master(pdev);
1550 host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
1553 printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n",
1559 hba = (struct st_hba *)host->hostdata;
1560 memset(hba, 0, sizeof(struct st_hba));
1562 err = pci_request_regions(pdev, DRV_NAME);
1564 printk(KERN_ERR DRV_NAME "(%s): request regions failed\n",
1566 goto out_scsi_host_put;
1569 hba->mmio_base = pci_ioremap_bar(pdev, 0);
1570 if ( !hba->mmio_base) {
1571 printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
1574 goto out_release_regions;
1577 err = stex_set_dma_mask(pdev);
1579 printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
1584 hba->cardtype = (unsigned int) id->driver_data;
1585 ci = &stex_card_info[hba->cardtype];
1586 switch (id->subdevice) {
1601 if (hba->cardtype == st_yel)
1602 hba->supports_pm = 1;
1605 sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size;
1606 if (hba->cardtype == st_yel)
1607 sts_offset += (ci->sts_count+1) * sizeof(u32);
1608 cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg);
1609 hba->dma_size = cp_offset + sizeof(struct st_frame);
1610 if (hba->cardtype == st_seq ||
1611 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1612 hba->extra_offset = hba->dma_size;
1613 hba->dma_size += ST_ADDITIONAL_MEM;
1615 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1616 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1617 if (!hba->dma_mem) {
1618 /* Retry minimum coherent mapping for st_seq and st_vsc */
1619 if (hba->cardtype == st_seq ||
1620 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1621 printk(KERN_WARNING DRV_NAME
1622 "(%s): allocating min buffer for controller\n",
1624 hba->dma_size = hba->extra_offset
1625 + ST_ADDITIONAL_MEM_MIN;
1626 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1627 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1630 if (!hba->dma_mem) {
1632 printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
1638 hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
1641 printk(KERN_ERR DRV_NAME "(%s): ccb alloc failed\n",
1646 if (hba->cardtype == st_yel)
1647 hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset);
1648 hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset);
1649 hba->copy_buffer = hba->dma_mem + cp_offset;
1650 hba->rq_count = ci->rq_count;
1651 hba->rq_size = ci->rq_size;
1652 hba->sts_count = ci->sts_count;
1653 hba->alloc_rq = ci->alloc_rq;
1654 hba->map_sg = ci->map_sg;
1655 hba->send = ci->send;
1656 hba->mu_status = MU_STATE_STARTING;
1658 if (hba->cardtype == st_yel)
1659 host->sg_tablesize = 38;
1661 host->sg_tablesize = 32;
1662 host->can_queue = ci->rq_count;
1663 host->cmd_per_lun = ci->rq_count;
1664 host->max_id = ci->max_id;
1665 host->max_lun = ci->max_lun;
1666 host->max_channel = ci->max_channel;
1667 host->unique_id = host->host_no;
1668 host->max_cmd_len = STEX_CDB_LENGTH;
1672 init_waitqueue_head(&hba->reset_waitq);
1674 snprintf(hba->work_q_name, sizeof(hba->work_q_name),
1675 "stex_wq_%d", host->host_no);
1676 hba->work_q = create_singlethread_workqueue(hba->work_q_name);
1678 printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n",
1683 INIT_WORK(&hba->reset_work, stex_reset_work);
1685 err = stex_request_irq(hba);
1687 printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
1692 err = stex_handshake(hba);
1696 pci_set_drvdata(pdev, hba);
1698 err = scsi_add_host(host, &pdev->dev);
1700 printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n",
1705 scsi_scan_host(host);
1712 destroy_workqueue(hba->work_q);
1716 dma_free_coherent(&pdev->dev, hba->dma_size,
1717 hba->dma_mem, hba->dma_handle);
1719 iounmap(hba->mmio_base);
1720 out_release_regions:
1721 pci_release_regions(pdev);
1723 scsi_host_put(host);
1725 pci_disable_device(pdev);
1730 static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic)
1732 struct req_msg *req;
1733 struct st_msg_header *msg_h;
1734 unsigned long flags;
1735 unsigned long before;
1738 spin_lock_irqsave(hba->host->host_lock, flags);
1740 if (hba->cardtype == st_yel && hba->supports_pm == 1)
1742 if(st_sleep_mic == ST_NOTHANDLED)
1744 spin_unlock_irqrestore(hba->host->host_lock, flags);
1748 req = hba->alloc_rq(hba);
1749 if (hba->cardtype == st_yel) {
1750 msg_h = (struct st_msg_header *)req - 1;
1751 memset(msg_h, 0, hba->rq_size);
1753 memset(req, 0, hba->rq_size);
1755 if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel)
1756 && st_sleep_mic == ST_IGNORED) {
1757 req->cdb[0] = MGT_CMD;
1758 req->cdb[1] = MGT_CMD_SIGNATURE;
1759 req->cdb[2] = CTLR_CONFIG_CMD;
1760 req->cdb[3] = CTLR_SHUTDOWN;
1761 } else if (hba->cardtype == st_yel && st_sleep_mic != ST_IGNORED) {
1762 req->cdb[0] = MGT_CMD;
1763 req->cdb[1] = MGT_CMD_SIGNATURE;
1764 req->cdb[2] = CTLR_CONFIG_CMD;
1765 req->cdb[3] = PMIC_SHUTDOWN;
1766 req->cdb[4] = st_sleep_mic;
1768 req->cdb[0] = CONTROLLER_CMD;
1769 req->cdb[1] = CTLR_POWER_STATE_CHANGE;
1770 req->cdb[2] = CTLR_POWER_SAVING;
1773 hba->ccb[tag].cmd = NULL;
1774 hba->ccb[tag].sg_count = 0;
1775 hba->ccb[tag].sense_bufflen = 0;
1776 hba->ccb[tag].sense_buffer = NULL;
1777 hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE;
1779 hba->send(hba, req, tag);
1780 spin_unlock_irqrestore(hba->host->host_lock, flags);
1783 while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
1784 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1785 hba->ccb[tag].req_type = 0;
1786 hba->mu_status = MU_STATE_STOP;
1791 hba->mu_status = MU_STATE_STOP;
1794 static void stex_hba_free(struct st_hba *hba)
1798 destroy_workqueue(hba->work_q);
1800 iounmap(hba->mmio_base);
1802 pci_release_regions(hba->pdev);
1806 dma_free_coherent(&hba->pdev->dev, hba->dma_size,
1807 hba->dma_mem, hba->dma_handle);
1810 static void stex_remove(struct pci_dev *pdev)
1812 struct st_hba *hba = pci_get_drvdata(pdev);
1814 hba->mu_status = MU_STATE_NOCONNECT;
1815 return_abnormal_state(hba, DID_NO_CONNECT);
1816 scsi_remove_host(hba->host);
1818 scsi_block_requests(hba->host);
1822 scsi_host_put(hba->host);
1824 pci_disable_device(pdev);
1827 static void stex_shutdown(struct pci_dev *pdev)
1829 struct st_hba *hba = pci_get_drvdata(pdev);
1831 if (hba->supports_pm == 0)
1832 stex_hba_stop(hba, ST_IGNORED);
1834 stex_hba_stop(hba, ST_S5);
1837 static int stex_choice_sleep_mic(pm_message_t state)
1839 switch (state.event) {
1840 case PM_EVENT_SUSPEND:
1842 case PM_EVENT_HIBERNATE:
1845 return ST_NOTHANDLED;
1849 static int stex_suspend(struct pci_dev *pdev, pm_message_t state)
1851 struct st_hba *hba = pci_get_drvdata(pdev);
1853 if (hba->cardtype == st_yel && hba->supports_pm == 1)
1854 stex_hba_stop(hba, stex_choice_sleep_mic(state));
1856 stex_hba_stop(hba, ST_IGNORED);
1860 static int stex_resume(struct pci_dev *pdev)
1862 struct st_hba *hba = pci_get_drvdata(pdev);
1864 hba->mu_status = MU_STATE_STARTING;
1865 stex_handshake(hba);
1868 MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
1870 static struct pci_driver stex_pci_driver = {
1872 .id_table = stex_pci_tbl,
1873 .probe = stex_probe,
1874 .remove = stex_remove,
1875 .shutdown = stex_shutdown,
1876 .suspend = stex_suspend,
1877 .resume = stex_resume,
1880 static int __init stex_init(void)
1882 printk(KERN_INFO DRV_NAME
1883 ": Promise SuperTrak EX Driver version: %s\n",
1886 return pci_register_driver(&stex_pci_driver);
1889 static void __exit stex_exit(void)
1891 pci_unregister_driver(&stex_pci_driver);
1894 module_init(stex_init);
1895 module_exit(stex_exit);