2 * Linux driver for VMware's para-virtualized SCSI HBA.
4 * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * Maintained by: Jim Gill <jgill@vmware.com>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/interrupt.h>
27 #include <linux/slab.h>
28 #include <linux/workqueue.h>
29 #include <linux/pci.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_tcq.h>
37 #include "vmw_pvscsi.h"
39 #define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
41 MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
42 MODULE_AUTHOR("VMware, Inc.");
43 MODULE_LICENSE("GPL");
44 MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
46 #define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
47 #define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
48 #define PVSCSI_DEFAULT_QUEUE_DEPTH 254
49 #define SGL_SIZE PAGE_SIZE
51 struct pvscsi_sg_list {
52 struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
57 * The index of the context in cmd_map serves as the context ID for a
58 * 1-to-1 mapping completions back to requests.
60 struct scsi_cmnd *cmd;
61 struct pvscsi_sg_list *sgl;
62 struct list_head list;
66 struct completion *abort_cmp;
69 struct pvscsi_adapter {
76 bool use_req_threshold;
80 struct workqueue_struct *workqueue;
81 struct work_struct work;
83 struct PVSCSIRingReqDesc *req_ring;
88 struct PVSCSIRingCmpDesc *cmp_ring;
92 struct PVSCSIRingMsgDesc *msg_ring;
96 struct PVSCSIRingsState *rings_state;
97 dma_addr_t ringStatePA;
100 struct Scsi_Host *host;
102 struct list_head cmd_pool;
103 struct pvscsi_ctx *cmd_map;
107 /* Command line parameters */
108 static int pvscsi_ring_pages;
109 static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
110 static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
111 static bool pvscsi_disable_msi;
112 static bool pvscsi_disable_msix;
113 static bool pvscsi_use_msg = true;
114 static bool pvscsi_use_req_threshold = true;
116 #define PVSCSI_RW (S_IRUSR | S_IWUSR)
118 module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
119 MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
120 __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING)
121 "[up to 16 targets],"
122 __stringify(PVSCSI_SETUP_RINGS_MAX_NUM_PAGES)
123 "[for 16+ targets])");
125 module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
126 MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
127 __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
129 module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
130 MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
131 __stringify(PVSCSI_DEFAULT_QUEUE_DEPTH) ")");
133 module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
134 MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
136 module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
137 MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
139 module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
140 MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
142 module_param_named(use_req_threshold, pvscsi_use_req_threshold,
144 MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1)");
146 static const struct pci_device_id pvscsi_pci_tbl[] = {
147 { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
151 MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
153 static struct device *
154 pvscsi_dev(const struct pvscsi_adapter *adapter)
156 return &(adapter->dev->dev);
159 static struct pvscsi_ctx *
160 pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
162 struct pvscsi_ctx *ctx, *end;
164 end = &adapter->cmd_map[adapter->req_depth];
165 for (ctx = adapter->cmd_map; ctx < end; ctx++)
172 static struct pvscsi_ctx *
173 pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
175 struct pvscsi_ctx *ctx;
177 if (list_empty(&adapter->cmd_pool))
180 ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
182 list_del(&ctx->list);
187 static void pvscsi_release_context(struct pvscsi_adapter *adapter,
188 struct pvscsi_ctx *ctx)
191 ctx->abort_cmp = NULL;
192 list_add(&ctx->list, &adapter->cmd_pool);
196 * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
197 * non-zero integer. ctx always points to an entry in cmd_map array, hence
198 * the return value is always >=1.
200 static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
201 const struct pvscsi_ctx *ctx)
203 return ctx - adapter->cmd_map + 1;
206 static struct pvscsi_ctx *
207 pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
209 return &adapter->cmd_map[context - 1];
212 static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
215 writel(val, adapter->mmioBase + offset);
218 static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
220 return readl(adapter->mmioBase + offset);
223 static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
225 return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
228 static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
231 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
234 static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
238 intr_bits = PVSCSI_INTR_CMPL_MASK;
239 if (adapter->use_msg)
240 intr_bits |= PVSCSI_INTR_MSG_MASK;
242 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
245 static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
247 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
250 static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
251 u32 cmd, const void *desc, size_t len)
253 const u32 *ptr = desc;
257 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
258 for (i = 0; i < len; i++)
259 pvscsi_reg_write(adapter,
260 PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
263 static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
264 const struct pvscsi_ctx *ctx)
266 struct PVSCSICmdDescAbortCmd cmd = { 0 };
268 cmd.target = ctx->cmd->device->id;
269 cmd.context = pvscsi_map_context(adapter, ctx);
271 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
274 static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
276 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
279 static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
281 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
284 static int scsi_is_rw(unsigned char op)
286 return op == READ_6 || op == WRITE_6 ||
287 op == READ_10 || op == WRITE_10 ||
288 op == READ_12 || op == WRITE_12 ||
289 op == READ_16 || op == WRITE_16;
292 static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
295 if (scsi_is_rw(op)) {
296 struct PVSCSIRingsState *s = adapter->rings_state;
298 if (!adapter->use_req_threshold ||
299 s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold)
300 pvscsi_kick_rw_io(adapter);
302 pvscsi_process_request_ring(adapter);
306 static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
308 dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
310 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
313 static void ll_bus_reset(const struct pvscsi_adapter *adapter)
315 dev_dbg(pvscsi_dev(adapter), "Resetting bus on %p\n", adapter);
317 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
320 static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
322 struct PVSCSICmdDescResetDevice cmd = { 0 };
324 dev_dbg(pvscsi_dev(adapter), "Resetting device: target=%u\n", target);
328 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
332 static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
333 struct scatterlist *sg, unsigned count)
336 struct PVSCSISGElement *sge;
338 BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
340 sge = &ctx->sgl->sge[0];
341 for (i = 0; i < count; i++, sg++) {
342 sge[i].addr = sg_dma_address(sg);
343 sge[i].length = sg_dma_len(sg);
349 * Map all data buffers for a command into PCI space and
350 * setup the scatter/gather list if needed.
352 static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
353 struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
354 struct PVSCSIRingReqDesc *e)
357 unsigned bufflen = scsi_bufflen(cmd);
358 struct scatterlist *sg;
360 e->dataLen = bufflen;
365 sg = scsi_sglist(cmd);
366 count = scsi_sg_count(cmd);
368 int segs = scsi_dma_map(cmd);
370 if (segs == -ENOMEM) {
371 scmd_printk(KERN_ERR, cmd,
372 "vmw_pvscsi: Failed to map cmd sglist for DMA.\n");
374 } else if (segs > 1) {
375 pvscsi_create_sg(ctx, sg, segs);
377 e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
378 ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
379 SGL_SIZE, PCI_DMA_TODEVICE);
380 if (pci_dma_mapping_error(adapter->dev, ctx->sglPA)) {
381 scmd_printk(KERN_ERR, cmd,
382 "vmw_pvscsi: Failed to map ctx sglist for DMA.\n");
387 e->dataAddr = ctx->sglPA;
389 e->dataAddr = sg_dma_address(sg);
392 * In case there is no S/G list, scsi_sglist points
393 * directly to the buffer.
395 ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
396 cmd->sc_data_direction);
397 if (pci_dma_mapping_error(adapter->dev, ctx->dataPA)) {
398 scmd_printk(KERN_ERR, cmd,
399 "vmw_pvscsi: Failed to map direct data buffer for DMA.\n");
402 e->dataAddr = ctx->dataPA;
408 static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
409 struct pvscsi_ctx *ctx)
411 struct scsi_cmnd *cmd;
415 bufflen = scsi_bufflen(cmd);
418 unsigned count = scsi_sg_count(cmd);
423 pci_unmap_single(adapter->dev, ctx->sglPA,
424 SGL_SIZE, PCI_DMA_TODEVICE);
428 pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
429 cmd->sc_data_direction);
431 if (cmd->sense_buffer)
432 pci_unmap_single(adapter->dev, ctx->sensePA,
433 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
436 static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
438 adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
439 &adapter->ringStatePA);
440 if (!adapter->rings_state)
443 adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
445 adapter->req_depth = adapter->req_pages
446 * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
447 adapter->req_ring = pci_alloc_consistent(adapter->dev,
448 adapter->req_pages * PAGE_SIZE,
449 &adapter->reqRingPA);
450 if (!adapter->req_ring)
453 adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
455 adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
456 adapter->cmp_pages * PAGE_SIZE,
457 &adapter->cmpRingPA);
458 if (!adapter->cmp_ring)
461 BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
462 BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
463 BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
465 if (!adapter->use_msg)
468 adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
469 pvscsi_msg_ring_pages);
470 adapter->msg_ring = pci_alloc_consistent(adapter->dev,
471 adapter->msg_pages * PAGE_SIZE,
472 &adapter->msgRingPA);
473 if (!adapter->msg_ring)
475 BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
480 static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
482 struct PVSCSICmdDescSetupRings cmd = { 0 };
486 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
487 cmd.reqRingNumPages = adapter->req_pages;
488 cmd.cmpRingNumPages = adapter->cmp_pages;
490 base = adapter->reqRingPA;
491 for (i = 0; i < adapter->req_pages; i++) {
492 cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
496 base = adapter->cmpRingPA;
497 for (i = 0; i < adapter->cmp_pages; i++) {
498 cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
502 memset(adapter->rings_state, 0, PAGE_SIZE);
503 memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
504 memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
506 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
509 if (adapter->use_msg) {
510 struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
512 cmd_msg.numPages = adapter->msg_pages;
514 base = adapter->msgRingPA;
515 for (i = 0; i < adapter->msg_pages; i++) {
516 cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
519 memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
521 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
522 &cmd_msg, sizeof(cmd_msg));
526 static int pvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
528 if (!sdev->tagged_supported)
530 return scsi_change_queue_depth(sdev, qdepth);
534 * Pull a completion descriptor off and pass the completion back
535 * to the SCSI mid layer.
537 static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
538 const struct PVSCSIRingCmpDesc *e)
540 struct pvscsi_ctx *ctx;
541 struct scsi_cmnd *cmd;
542 struct completion *abort_cmp;
543 u32 btstat = e->hostStatus;
544 u32 sdstat = e->scsiStatus;
546 ctx = pvscsi_get_context(adapter, e->context);
548 abort_cmp = ctx->abort_cmp;
549 pvscsi_unmap_buffers(adapter, ctx);
550 pvscsi_release_context(adapter, ctx);
553 * The command was requested to be aborted. Just signal that
554 * the request completed and swallow the actual cmd completion
555 * here. The abort handler will post a completion for this
556 * command indicating that it got successfully aborted.
563 if (sdstat != SAM_STAT_GOOD &&
564 (btstat == BTSTAT_SUCCESS ||
565 btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
566 btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
567 if (sdstat == SAM_STAT_COMMAND_TERMINATED) {
568 cmd->result = (DID_RESET << 16);
570 cmd->result = (DID_OK << 16) | sdstat;
571 if (sdstat == SAM_STAT_CHECK_CONDITION &&
573 cmd->result |= (DRIVER_SENSE << 24);
578 case BTSTAT_LINKED_COMMAND_COMPLETED:
579 case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
581 * Commands like INQUIRY may transfer less data than
582 * requested by the initiator via bufflen. Set residual
583 * count to make upper layer aware of the actual amount
584 * of data returned. There are cases when controller
585 * returns zero dataLen with non zero data - do not set
586 * residual count in that case.
588 if (e->dataLen && (e->dataLen < scsi_bufflen(cmd)))
589 scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
590 cmd->result = (DID_OK << 16);
594 case BTSTAT_DATA_UNDERRUN:
595 /* Report residual data in underruns */
596 scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
597 cmd->result = (DID_ERROR << 16);
600 case BTSTAT_SELTIMEO:
601 /* Our emulation returns this for non-connected devs */
602 cmd->result = (DID_BAD_TARGET << 16);
605 case BTSTAT_LUNMISMATCH:
606 case BTSTAT_TAGREJECT:
608 cmd->result = (DRIVER_INVALID << 24);
611 case BTSTAT_HAHARDWARE:
612 case BTSTAT_INVPHASE:
613 case BTSTAT_HATIMEOUT:
614 case BTSTAT_NORESPONSE:
615 case BTSTAT_DISCONNECT:
616 case BTSTAT_HASOFTWARE:
618 case BTSTAT_SENSFAILED:
619 cmd->result |= (DID_ERROR << 16);
624 case BTSTAT_BUSRESET:
625 cmd->result = (DID_RESET << 16);
628 case BTSTAT_ABORTQUEUE:
629 cmd->result = (DID_ABORT << 16);
632 case BTSTAT_SCSIPARITY:
633 cmd->result = (DID_PARITY << 16);
637 cmd->result = (DID_ERROR << 16);
638 scmd_printk(KERN_DEBUG, cmd,
639 "Unknown completion status: 0x%x\n",
643 dev_dbg(&cmd->device->sdev_gendev,
644 "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
645 cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
651 * barrier usage : Since the PVSCSI device is emulated, there could be cases
652 * where we may want to serialize some accesses between the driver and the
653 * emulation layer. We use compiler barriers instead of the more expensive
654 * memory barriers because PVSCSI is only supported on X86 which has strong
655 * memory access ordering.
657 static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
659 struct PVSCSIRingsState *s = adapter->rings_state;
660 struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
661 u32 cmp_entries = s->cmpNumEntriesLog2;
663 while (s->cmpConsIdx != s->cmpProdIdx) {
664 struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
667 * This barrier() ensures that *e is not dereferenced while
668 * the device emulation still writes data into the slot.
669 * Since the device emulation advances s->cmpProdIdx only after
670 * updating the slot we want to check it first.
673 pvscsi_complete_request(adapter, e);
675 * This barrier() ensures that compiler doesn't reorder write
676 * to s->cmpConsIdx before the read of (*e) inside
677 * pvscsi_complete_request. Otherwise, device emulation may
678 * overwrite *e before we had a chance to read it.
686 * Translate a Linux SCSI request into a request ring entry.
688 static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
689 struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
691 struct PVSCSIRingsState *s;
692 struct PVSCSIRingReqDesc *e;
693 struct scsi_device *sdev;
696 s = adapter->rings_state;
698 req_entries = s->reqNumEntriesLog2;
701 * If this condition holds, we might have room on the request ring, but
702 * we might not have room on the completion ring for the response.
703 * However, we have already ruled out this possibility - we would not
704 * have successfully allocated a context if it were true, since we only
705 * have one context per request entry. Check for it anyway, since it
706 * would be a serious bug.
708 if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
709 scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
710 "ring full: reqProdIdx=%d cmpConsIdx=%d\n",
711 s->reqProdIdx, s->cmpConsIdx);
715 e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
717 e->bus = sdev->channel;
718 e->target = sdev->id;
719 memset(e->lun, 0, sizeof(e->lun));
720 e->lun[1] = sdev->lun;
722 if (cmd->sense_buffer) {
723 ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
724 SCSI_SENSE_BUFFERSIZE,
726 if (pci_dma_mapping_error(adapter->dev, ctx->sensePA)) {
727 scmd_printk(KERN_ERR, cmd,
728 "vmw_pvscsi: Failed to map sense buffer for DMA.\n");
732 e->senseAddr = ctx->sensePA;
733 e->senseLen = SCSI_SENSE_BUFFERSIZE;
738 e->cdbLen = cmd->cmd_len;
739 e->vcpuHint = smp_processor_id();
740 memcpy(e->cdb, cmd->cmnd, e->cdbLen);
742 e->tag = SIMPLE_QUEUE_TAG;
744 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
745 e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
746 else if (cmd->sc_data_direction == DMA_TO_DEVICE)
747 e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
748 else if (cmd->sc_data_direction == DMA_NONE)
749 e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
753 if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) {
754 if (cmd->sense_buffer) {
755 pci_unmap_single(adapter->dev, ctx->sensePA,
756 SCSI_SENSE_BUFFERSIZE,
763 e->context = pvscsi_map_context(adapter, ctx);
772 static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
774 struct Scsi_Host *host = cmd->device->host;
775 struct pvscsi_adapter *adapter = shost_priv(host);
776 struct pvscsi_ctx *ctx;
780 spin_lock_irqsave(&adapter->hw_lock, flags);
782 ctx = pvscsi_acquire_context(adapter, cmd);
783 if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
785 pvscsi_release_context(adapter, ctx);
786 spin_unlock_irqrestore(&adapter->hw_lock, flags);
787 return SCSI_MLQUEUE_HOST_BUSY;
790 cmd->scsi_done = done;
793 dev_dbg(&cmd->device->sdev_gendev,
794 "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op);
796 spin_unlock_irqrestore(&adapter->hw_lock, flags);
798 pvscsi_kick_io(adapter, op);
803 static DEF_SCSI_QCMD(pvscsi_queue)
805 static int pvscsi_abort(struct scsi_cmnd *cmd)
807 struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
808 struct pvscsi_ctx *ctx;
810 int result = SUCCESS;
811 DECLARE_COMPLETION_ONSTACK(abort_cmp);
814 scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
815 adapter->host->host_no, cmd);
817 spin_lock_irqsave(&adapter->hw_lock, flags);
820 * Poll the completion ring first - we might be trying to abort
821 * a command that is waiting to be dispatched in the completion ring.
823 pvscsi_process_completion_ring(adapter);
826 * If there is no context for the command, it either already succeeded
827 * or else was never properly issued. Not our problem.
829 ctx = pvscsi_find_context(adapter, cmd);
831 scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
836 * Mark that the command has been requested to be aborted and issue
839 ctx->abort_cmp = &abort_cmp;
841 pvscsi_abort_cmd(adapter, ctx);
842 spin_unlock_irqrestore(&adapter->hw_lock, flags);
843 /* Wait for 2 secs for the completion. */
844 done = wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
845 spin_lock_irqsave(&adapter->hw_lock, flags);
849 * Failed to abort the command, unmark the fact that it
850 * was requested to be aborted.
852 ctx->abort_cmp = NULL;
854 scmd_printk(KERN_DEBUG, cmd,
855 "Failed to get completion for aborted cmd %p\n",
861 * Successfully aborted the command.
863 cmd->result = (DID_ABORT << 16);
867 spin_unlock_irqrestore(&adapter->hw_lock, flags);
872 * Abort all outstanding requests. This is only safe to use if the completion
873 * ring will never be walked again or the device has been reset, because it
874 * destroys the 1-1 mapping between context field passed to emulation and our
877 static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
881 for (i = 0; i < adapter->req_depth; i++) {
882 struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
883 struct scsi_cmnd *cmd = ctx->cmd;
885 scmd_printk(KERN_ERR, cmd,
886 "Forced reset on cmd %p\n", cmd);
887 pvscsi_unmap_buffers(adapter, ctx);
888 pvscsi_release_context(adapter, ctx);
889 cmd->result = (DID_RESET << 16);
895 static int pvscsi_host_reset(struct scsi_cmnd *cmd)
897 struct Scsi_Host *host = cmd->device->host;
898 struct pvscsi_adapter *adapter = shost_priv(host);
902 scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
904 spin_lock_irqsave(&adapter->hw_lock, flags);
906 use_msg = adapter->use_msg;
909 adapter->use_msg = 0;
910 spin_unlock_irqrestore(&adapter->hw_lock, flags);
913 * Now that we know that the ISR won't add more work on the
914 * workqueue we can safely flush any outstanding work.
916 flush_workqueue(adapter->workqueue);
917 spin_lock_irqsave(&adapter->hw_lock, flags);
921 * We're going to tear down the entire ring structure and set it back
922 * up, so stalling new requests until all completions are flushed and
923 * the rings are back in place.
926 pvscsi_process_request_ring(adapter);
928 ll_adapter_reset(adapter);
931 * Now process any completions. Note we do this AFTER adapter reset,
932 * which is strange, but stops races where completions get posted
933 * between processing the ring and issuing the reset. The backend will
934 * not touch the ring memory after reset, so the immediately pre-reset
935 * completion ring state is still valid.
937 pvscsi_process_completion_ring(adapter);
939 pvscsi_reset_all(adapter);
940 adapter->use_msg = use_msg;
941 pvscsi_setup_all_rings(adapter);
942 pvscsi_unmask_intr(adapter);
944 spin_unlock_irqrestore(&adapter->hw_lock, flags);
949 static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
951 struct Scsi_Host *host = cmd->device->host;
952 struct pvscsi_adapter *adapter = shost_priv(host);
955 scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
958 * We don't want to queue new requests for this bus after
959 * flushing all pending requests to emulation, since new
960 * requests could then sneak in during this bus reset phase,
961 * so take the lock now.
963 spin_lock_irqsave(&adapter->hw_lock, flags);
965 pvscsi_process_request_ring(adapter);
966 ll_bus_reset(adapter);
967 pvscsi_process_completion_ring(adapter);
969 spin_unlock_irqrestore(&adapter->hw_lock, flags);
974 static int pvscsi_device_reset(struct scsi_cmnd *cmd)
976 struct Scsi_Host *host = cmd->device->host;
977 struct pvscsi_adapter *adapter = shost_priv(host);
980 scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
981 host->host_no, cmd->device->id);
984 * We don't want to queue new requests for this device after flushing
985 * all pending requests to emulation, since new requests could then
986 * sneak in during this device reset phase, so take the lock now.
988 spin_lock_irqsave(&adapter->hw_lock, flags);
990 pvscsi_process_request_ring(adapter);
991 ll_device_reset(adapter, cmd->device->id);
992 pvscsi_process_completion_ring(adapter);
994 spin_unlock_irqrestore(&adapter->hw_lock, flags);
999 static struct scsi_host_template pvscsi_template;
1001 static const char *pvscsi_info(struct Scsi_Host *host)
1003 struct pvscsi_adapter *adapter = shost_priv(host);
1004 static char buf[256];
1006 sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
1007 "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
1008 adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
1009 pvscsi_template.cmd_per_lun);
1014 static struct scsi_host_template pvscsi_template = {
1015 .module = THIS_MODULE,
1016 .name = "VMware PVSCSI Host Adapter",
1017 .proc_name = "vmw_pvscsi",
1018 .info = pvscsi_info,
1019 .queuecommand = pvscsi_queue,
1021 .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
1022 .dma_boundary = UINT_MAX,
1023 .max_sectors = 0xffff,
1024 .use_clustering = ENABLE_CLUSTERING,
1025 .change_queue_depth = pvscsi_change_queue_depth,
1026 .eh_abort_handler = pvscsi_abort,
1027 .eh_device_reset_handler = pvscsi_device_reset,
1028 .eh_bus_reset_handler = pvscsi_bus_reset,
1029 .eh_host_reset_handler = pvscsi_host_reset,
1032 static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
1033 const struct PVSCSIRingMsgDesc *e)
1035 struct PVSCSIRingsState *s = adapter->rings_state;
1036 struct Scsi_Host *host = adapter->host;
1037 struct scsi_device *sdev;
1039 printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
1040 e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
1042 BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
1044 if (e->type == PVSCSI_MSG_DEV_ADDED) {
1045 struct PVSCSIMsgDescDevStatusChanged *desc;
1046 desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
1049 "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n",
1050 desc->bus, desc->target, desc->lun[1]);
1052 if (!scsi_host_get(host))
1055 sdev = scsi_device_lookup(host, desc->bus, desc->target,
1058 printk(KERN_INFO "vmw_pvscsi: device already exists\n");
1059 scsi_device_put(sdev);
1061 scsi_add_device(adapter->host, desc->bus,
1062 desc->target, desc->lun[1]);
1064 scsi_host_put(host);
1065 } else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
1066 struct PVSCSIMsgDescDevStatusChanged *desc;
1067 desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
1070 "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n",
1071 desc->bus, desc->target, desc->lun[1]);
1073 if (!scsi_host_get(host))
1076 sdev = scsi_device_lookup(host, desc->bus, desc->target,
1079 scsi_remove_device(sdev);
1080 scsi_device_put(sdev);
1083 "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n",
1084 desc->bus, desc->target, desc->lun[1]);
1086 scsi_host_put(host);
1090 static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
1092 struct PVSCSIRingsState *s = adapter->rings_state;
1094 return s->msgProdIdx != s->msgConsIdx;
1097 static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
1099 struct PVSCSIRingsState *s = adapter->rings_state;
1100 struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
1101 u32 msg_entries = s->msgNumEntriesLog2;
1103 while (pvscsi_msg_pending(adapter)) {
1104 struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
1108 pvscsi_process_msg(adapter, e);
1114 static void pvscsi_msg_workqueue_handler(struct work_struct *data)
1116 struct pvscsi_adapter *adapter;
1118 adapter = container_of(data, struct pvscsi_adapter, work);
1120 pvscsi_process_msg_ring(adapter);
1123 static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
1127 if (!pvscsi_use_msg)
1130 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
1131 PVSCSI_CMD_SETUP_MSG_RING);
1133 if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
1136 snprintf(name, sizeof(name),
1137 "vmw_pvscsi_wq_%u", adapter->host->host_no);
1139 adapter->workqueue = create_singlethread_workqueue(name);
1140 if (!adapter->workqueue) {
1141 printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
1144 INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
1149 static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter,
1154 if (!pvscsi_use_req_threshold)
1157 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
1158 PVSCSI_CMD_SETUP_REQCALLTHRESHOLD);
1159 val = pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS);
1161 printk(KERN_INFO "vmw_pvscsi: device does not support req_threshold\n");
1164 struct PVSCSICmdDescSetupReqCall cmd_msg = { 0 };
1165 cmd_msg.enable = enable;
1167 "vmw_pvscsi: %sabling reqCallThreshold\n",
1168 enable ? "en" : "dis");
1169 pvscsi_write_cmd_desc(adapter,
1170 PVSCSI_CMD_SETUP_REQCALLTHRESHOLD,
1171 &cmd_msg, sizeof(cmd_msg));
1172 return pvscsi_reg_read(adapter,
1173 PVSCSI_REG_OFFSET_COMMAND_STATUS) != 0;
1177 static irqreturn_t pvscsi_isr(int irq, void *devp)
1179 struct pvscsi_adapter *adapter = devp;
1182 if (adapter->use_msi || adapter->use_msix)
1185 u32 val = pvscsi_read_intr_status(adapter);
1186 handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
1188 pvscsi_write_intr_status(devp, val);
1192 unsigned long flags;
1194 spin_lock_irqsave(&adapter->hw_lock, flags);
1196 pvscsi_process_completion_ring(adapter);
1197 if (adapter->use_msg && pvscsi_msg_pending(adapter))
1198 queue_work(adapter->workqueue, &adapter->work);
1200 spin_unlock_irqrestore(&adapter->hw_lock, flags);
1203 return IRQ_RETVAL(handled);
1206 static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
1208 struct pvscsi_ctx *ctx = adapter->cmd_map;
1211 for (i = 0; i < adapter->req_depth; ++i, ++ctx)
1212 free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
1215 static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter,
1218 struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
1221 ret = pci_enable_msix_exact(adapter->dev, &entry, 1);
1225 *irq = entry.vector;
1230 static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
1233 free_irq(adapter->irq, adapter);
1236 if (adapter->use_msi) {
1237 pci_disable_msi(adapter->dev);
1238 adapter->use_msi = 0;
1239 } else if (adapter->use_msix) {
1240 pci_disable_msix(adapter->dev);
1241 adapter->use_msix = 0;
1245 static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
1247 if (adapter->workqueue)
1248 destroy_workqueue(adapter->workqueue);
1250 if (adapter->mmioBase)
1251 pci_iounmap(adapter->dev, adapter->mmioBase);
1253 pci_release_regions(adapter->dev);
1255 if (adapter->cmd_map) {
1256 pvscsi_free_sgls(adapter);
1257 kfree(adapter->cmd_map);
1260 if (adapter->rings_state)
1261 pci_free_consistent(adapter->dev, PAGE_SIZE,
1262 adapter->rings_state, adapter->ringStatePA);
1264 if (adapter->req_ring)
1265 pci_free_consistent(adapter->dev,
1266 adapter->req_pages * PAGE_SIZE,
1267 adapter->req_ring, adapter->reqRingPA);
1269 if (adapter->cmp_ring)
1270 pci_free_consistent(adapter->dev,
1271 adapter->cmp_pages * PAGE_SIZE,
1272 adapter->cmp_ring, adapter->cmpRingPA);
1274 if (adapter->msg_ring)
1275 pci_free_consistent(adapter->dev,
1276 adapter->msg_pages * PAGE_SIZE,
1277 adapter->msg_ring, adapter->msgRingPA);
1281 * Allocate scatter gather lists.
1283 * These are statically allocated. Trying to be clever was not worth it.
1285 * Dynamic allocation can fail, and we can't go deep into the memory
1286 * allocator, since we're a SCSI driver, and trying too hard to allocate
1287 * memory might generate disk I/O. We also don't want to fail disk I/O
1288 * in that case because we can't get an allocation - the I/O could be
1289 * trying to swap out data to free memory. Since that is pathological,
1290 * just use a statically allocated scatter list.
1293 static int pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
1295 struct pvscsi_ctx *ctx;
1298 ctx = adapter->cmd_map;
1299 BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE);
1301 for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
1302 ctx->sgl = (void *)__get_free_pages(GFP_KERNEL,
1303 get_order(SGL_SIZE));
1305 BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
1307 for (; i >= 0; --i, --ctx) {
1308 free_pages((unsigned long)ctx->sgl,
1309 get_order(SGL_SIZE));
1320 * Query the device, fetch the config info and return the
1321 * maximum number of targets on the adapter. In case of
1322 * failure due to any reason return default i.e. 16.
1324 static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
1326 struct PVSCSICmdDescConfigCmd cmd;
1327 struct PVSCSIConfigPageHeader *header;
1329 dma_addr_t configPagePA;
1333 dev = pvscsi_dev(adapter);
1334 config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
1337 dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
1340 BUG_ON(configPagePA & ~PAGE_MASK);
1342 /* Fetch config info from the device. */
1343 cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32;
1344 cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER;
1345 cmd.cmpAddr = configPagePA;
1349 * Mark the completion page header with error values. If the device
1350 * completes the command successfully, it sets the status values to
1353 header = config_page;
1354 memset(header, 0, sizeof *header);
1355 header->hostStatus = BTSTAT_INVPARAM;
1356 header->scsiStatus = SDSTAT_CHECK;
1358 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd);
1360 if (header->hostStatus == BTSTAT_SUCCESS &&
1361 header->scsiStatus == SDSTAT_GOOD) {
1362 struct PVSCSIConfigPageController *config;
1364 config = config_page;
1365 numPhys = config->numPhys;
1367 dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
1368 header->hostStatus, header->scsiStatus);
1369 pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
1374 static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1376 struct pvscsi_adapter *adapter;
1377 struct pvscsi_adapter adapter_temp;
1378 struct Scsi_Host *host = NULL;
1380 unsigned long flags = 0;
1386 if (pci_enable_device(pdev))
1389 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
1390 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
1391 printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
1392 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
1393 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
1394 printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
1396 printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
1397 goto out_disable_device;
1401 * Let's use a temp pvscsi_adapter struct until we find the number of
1402 * targets on the adapter, after that we will switch to the real
1405 adapter = &adapter_temp;
1406 memset(adapter, 0, sizeof(*adapter));
1407 adapter->dev = pdev;
1408 adapter->rev = pdev->revision;
1410 if (pci_request_regions(pdev, "vmw_pvscsi")) {
1411 printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
1412 goto out_disable_device;
1415 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1416 if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
1419 if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
1425 if (i == DEVICE_COUNT_RESOURCE) {
1427 "vmw_pvscsi: adapter has no suitable MMIO region\n");
1428 goto out_release_resources_and_disable;
1431 adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
1433 if (!adapter->mmioBase) {
1435 "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
1436 i, PVSCSI_MEM_SPACE_SIZE);
1437 goto out_release_resources_and_disable;
1440 pci_set_master(pdev);
1443 * Ask the device for max number of targets before deciding the
1444 * default pvscsi_ring_pages value.
1446 max_id = pvscsi_get_max_targets(adapter);
1447 printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id);
1449 if (pvscsi_ring_pages == 0)
1451 * Set the right default value. Up to 16 it is 8, above it is
1454 pvscsi_ring_pages = (max_id > 16) ?
1455 PVSCSI_SETUP_RINGS_MAX_NUM_PAGES :
1456 PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
1458 "vmw_pvscsi: setting ring_pages to %d\n",
1461 pvscsi_template.can_queue =
1462 min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
1463 PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
1464 pvscsi_template.cmd_per_lun =
1465 min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
1466 host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
1468 printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
1469 goto out_release_resources_and_disable;
1473 * Let's use the real pvscsi_adapter struct here onwards.
1475 adapter = shost_priv(host);
1476 memset(adapter, 0, sizeof(*adapter));
1477 adapter->dev = pdev;
1478 adapter->host = host;
1480 * Copy back what we already have to the allocated adapter struct.
1482 adapter->rev = adapter_temp.rev;
1483 adapter->mmioBase = adapter_temp.mmioBase;
1485 spin_lock_init(&adapter->hw_lock);
1486 host->max_channel = 0;
1488 host->max_cmd_len = 16;
1489 host->max_id = max_id;
1491 pci_set_drvdata(pdev, host);
1493 ll_adapter_reset(adapter);
1495 adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
1497 error = pvscsi_allocate_rings(adapter);
1499 printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
1500 goto out_release_resources;
1504 * From this point on we should reset the adapter if anything goes
1507 pvscsi_setup_all_rings(adapter);
1509 adapter->cmd_map = kcalloc(adapter->req_depth,
1510 sizeof(struct pvscsi_ctx), GFP_KERNEL);
1511 if (!adapter->cmd_map) {
1512 printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
1514 goto out_reset_adapter;
1517 INIT_LIST_HEAD(&adapter->cmd_pool);
1518 for (i = 0; i < adapter->req_depth; i++) {
1519 struct pvscsi_ctx *ctx = adapter->cmd_map + i;
1520 list_add(&ctx->list, &adapter->cmd_pool);
1523 error = pvscsi_allocate_sg(adapter);
1525 printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
1526 goto out_reset_adapter;
1529 if (!pvscsi_disable_msix &&
1530 pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
1531 printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
1532 adapter->use_msix = 1;
1533 } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
1534 printk(KERN_INFO "vmw_pvscsi: using MSI\n");
1535 adapter->use_msi = 1;
1536 adapter->irq = pdev->irq;
1538 printk(KERN_INFO "vmw_pvscsi: using INTx\n");
1539 adapter->irq = pdev->irq;
1540 flags = IRQF_SHARED;
1543 adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
1544 printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n",
1545 adapter->use_req_threshold ? "en" : "dis");
1547 error = request_irq(adapter->irq, pvscsi_isr, flags,
1548 "vmw_pvscsi", adapter);
1551 "vmw_pvscsi: unable to request IRQ: %d\n", error);
1553 goto out_reset_adapter;
1556 error = scsi_add_host(host, &pdev->dev);
1559 "vmw_pvscsi: scsi_add_host failed: %d\n", error);
1560 goto out_reset_adapter;
1563 dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
1564 adapter->rev, host->host_no);
1566 pvscsi_unmask_intr(adapter);
1568 scsi_scan_host(host);
1573 ll_adapter_reset(adapter);
1574 out_release_resources:
1575 pvscsi_shutdown_intr(adapter);
1576 pvscsi_release_resources(adapter);
1577 scsi_host_put(host);
1579 pci_disable_device(pdev);
1583 out_release_resources_and_disable:
1584 pvscsi_shutdown_intr(adapter);
1585 pvscsi_release_resources(adapter);
1586 goto out_disable_device;
1589 static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
1591 pvscsi_mask_intr(adapter);
1593 if (adapter->workqueue)
1594 flush_workqueue(adapter->workqueue);
1596 pvscsi_shutdown_intr(adapter);
1598 pvscsi_process_request_ring(adapter);
1599 pvscsi_process_completion_ring(adapter);
1600 ll_adapter_reset(adapter);
1603 static void pvscsi_shutdown(struct pci_dev *dev)
1605 struct Scsi_Host *host = pci_get_drvdata(dev);
1606 struct pvscsi_adapter *adapter = shost_priv(host);
1608 __pvscsi_shutdown(adapter);
1611 static void pvscsi_remove(struct pci_dev *pdev)
1613 struct Scsi_Host *host = pci_get_drvdata(pdev);
1614 struct pvscsi_adapter *adapter = shost_priv(host);
1616 scsi_remove_host(host);
1618 __pvscsi_shutdown(adapter);
1619 pvscsi_release_resources(adapter);
1621 scsi_host_put(host);
1623 pci_disable_device(pdev);
1626 static struct pci_driver pvscsi_pci_driver = {
1627 .name = "vmw_pvscsi",
1628 .id_table = pvscsi_pci_tbl,
1629 .probe = pvscsi_probe,
1630 .remove = pvscsi_remove,
1631 .shutdown = pvscsi_shutdown,
1634 static int __init pvscsi_init(void)
1636 pr_info("%s - version %s\n",
1637 PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
1638 return pci_register_driver(&pvscsi_pci_driver);
1641 static void __exit pvscsi_exit(void)
1643 pci_unregister_driver(&pvscsi_pci_driver);
1646 module_init(pvscsi_init);
1647 module_exit(pvscsi_exit);