2 * Linux driver for VMware's para-virtualized SCSI HBA.
4 * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * Maintained by: Jim Gill <jgill@vmware.com>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/interrupt.h>
27 #include <linux/slab.h>
28 #include <linux/workqueue.h>
29 #include <linux/pci.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_tcq.h>
37 #include "vmw_pvscsi.h"
39 #define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
41 MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
42 MODULE_AUTHOR("VMware, Inc.");
43 MODULE_LICENSE("GPL");
44 MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
46 #define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
47 #define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
48 #define PVSCSI_DEFAULT_QUEUE_DEPTH 254
49 #define SGL_SIZE PAGE_SIZE
51 struct pvscsi_sg_list {
52 struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
57 * The index of the context in cmd_map serves as the context ID for a
58 * 1-to-1 mapping completions back to requests.
60 struct scsi_cmnd *cmd;
61 struct pvscsi_sg_list *sgl;
62 struct list_head list;
66 struct completion *abort_cmp;
69 struct pvscsi_adapter {
73 bool use_req_threshold;
77 struct workqueue_struct *workqueue;
78 struct work_struct work;
80 struct PVSCSIRingReqDesc *req_ring;
85 struct PVSCSIRingCmpDesc *cmp_ring;
89 struct PVSCSIRingMsgDesc *msg_ring;
93 struct PVSCSIRingsState *rings_state;
94 dma_addr_t ringStatePA;
97 struct Scsi_Host *host;
99 struct list_head cmd_pool;
100 struct pvscsi_ctx *cmd_map;
104 /* Command line parameters */
105 static int pvscsi_ring_pages;
106 static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
107 static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
108 static bool pvscsi_disable_msi;
109 static bool pvscsi_disable_msix;
110 static bool pvscsi_use_msg = true;
111 static bool pvscsi_use_req_threshold = true;
113 #define PVSCSI_RW (S_IRUSR | S_IWUSR)
115 module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
116 MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
117 __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING)
118 "[up to 16 targets],"
119 __stringify(PVSCSI_SETUP_RINGS_MAX_NUM_PAGES)
120 "[for 16+ targets])");
122 module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
123 MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
124 __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
126 module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
127 MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
128 __stringify(PVSCSI_DEFAULT_QUEUE_DEPTH) ")");
130 module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
131 MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
133 module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
134 MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
136 module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
137 MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
139 module_param_named(use_req_threshold, pvscsi_use_req_threshold,
141 MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1)");
143 static const struct pci_device_id pvscsi_pci_tbl[] = {
144 { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
148 MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
150 static struct device *
151 pvscsi_dev(const struct pvscsi_adapter *adapter)
153 return &(adapter->dev->dev);
156 static struct pvscsi_ctx *
157 pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
159 struct pvscsi_ctx *ctx, *end;
161 end = &adapter->cmd_map[adapter->req_depth];
162 for (ctx = adapter->cmd_map; ctx < end; ctx++)
169 static struct pvscsi_ctx *
170 pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
172 struct pvscsi_ctx *ctx;
174 if (list_empty(&adapter->cmd_pool))
177 ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
179 list_del(&ctx->list);
184 static void pvscsi_release_context(struct pvscsi_adapter *adapter,
185 struct pvscsi_ctx *ctx)
188 ctx->abort_cmp = NULL;
189 list_add(&ctx->list, &adapter->cmd_pool);
193 * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
194 * non-zero integer. ctx always points to an entry in cmd_map array, hence
195 * the return value is always >=1.
197 static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
198 const struct pvscsi_ctx *ctx)
200 return ctx - adapter->cmd_map + 1;
203 static struct pvscsi_ctx *
204 pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
206 return &adapter->cmd_map[context - 1];
209 static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
212 writel(val, adapter->mmioBase + offset);
215 static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
217 return readl(adapter->mmioBase + offset);
220 static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
222 return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
225 static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
228 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
231 static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
235 intr_bits = PVSCSI_INTR_CMPL_MASK;
236 if (adapter->use_msg)
237 intr_bits |= PVSCSI_INTR_MSG_MASK;
239 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
242 static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
244 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
247 static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
248 u32 cmd, const void *desc, size_t len)
250 const u32 *ptr = desc;
254 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
255 for (i = 0; i < len; i++)
256 pvscsi_reg_write(adapter,
257 PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
260 static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
261 const struct pvscsi_ctx *ctx)
263 struct PVSCSICmdDescAbortCmd cmd = { 0 };
265 cmd.target = ctx->cmd->device->id;
266 cmd.context = pvscsi_map_context(adapter, ctx);
268 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
271 static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
273 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
276 static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
278 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
281 static int scsi_is_rw(unsigned char op)
283 return op == READ_6 || op == WRITE_6 ||
284 op == READ_10 || op == WRITE_10 ||
285 op == READ_12 || op == WRITE_12 ||
286 op == READ_16 || op == WRITE_16;
289 static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
292 if (scsi_is_rw(op)) {
293 struct PVSCSIRingsState *s = adapter->rings_state;
295 if (!adapter->use_req_threshold ||
296 s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold)
297 pvscsi_kick_rw_io(adapter);
299 pvscsi_process_request_ring(adapter);
303 static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
305 dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
307 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
310 static void ll_bus_reset(const struct pvscsi_adapter *adapter)
312 dev_dbg(pvscsi_dev(adapter), "Resetting bus on %p\n", adapter);
314 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
317 static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
319 struct PVSCSICmdDescResetDevice cmd = { 0 };
321 dev_dbg(pvscsi_dev(adapter), "Resetting device: target=%u\n", target);
325 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
329 static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
330 struct scatterlist *sg, unsigned count)
333 struct PVSCSISGElement *sge;
335 BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
337 sge = &ctx->sgl->sge[0];
338 for (i = 0; i < count; i++, sg++) {
339 sge[i].addr = sg_dma_address(sg);
340 sge[i].length = sg_dma_len(sg);
346 * Map all data buffers for a command into PCI space and
347 * setup the scatter/gather list if needed.
349 static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
350 struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
351 struct PVSCSIRingReqDesc *e)
354 unsigned bufflen = scsi_bufflen(cmd);
355 struct scatterlist *sg;
357 e->dataLen = bufflen;
362 sg = scsi_sglist(cmd);
363 count = scsi_sg_count(cmd);
365 int segs = scsi_dma_map(cmd);
367 if (segs == -ENOMEM) {
368 scmd_printk(KERN_ERR, cmd,
369 "vmw_pvscsi: Failed to map cmd sglist for DMA.\n");
371 } else if (segs > 1) {
372 pvscsi_create_sg(ctx, sg, segs);
374 e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
375 ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
376 SGL_SIZE, PCI_DMA_TODEVICE);
377 if (pci_dma_mapping_error(adapter->dev, ctx->sglPA)) {
378 scmd_printk(KERN_ERR, cmd,
379 "vmw_pvscsi: Failed to map ctx sglist for DMA.\n");
384 e->dataAddr = ctx->sglPA;
386 e->dataAddr = sg_dma_address(sg);
389 * In case there is no S/G list, scsi_sglist points
390 * directly to the buffer.
392 ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
393 cmd->sc_data_direction);
394 if (pci_dma_mapping_error(adapter->dev, ctx->dataPA)) {
395 scmd_printk(KERN_ERR, cmd,
396 "vmw_pvscsi: Failed to map direct data buffer for DMA.\n");
399 e->dataAddr = ctx->dataPA;
405 static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
406 struct pvscsi_ctx *ctx)
408 struct scsi_cmnd *cmd;
412 bufflen = scsi_bufflen(cmd);
415 unsigned count = scsi_sg_count(cmd);
420 pci_unmap_single(adapter->dev, ctx->sglPA,
421 SGL_SIZE, PCI_DMA_TODEVICE);
425 pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
426 cmd->sc_data_direction);
428 if (cmd->sense_buffer)
429 pci_unmap_single(adapter->dev, ctx->sensePA,
430 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
433 static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
435 adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
436 &adapter->ringStatePA);
437 if (!adapter->rings_state)
440 adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
442 adapter->req_depth = adapter->req_pages
443 * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
444 adapter->req_ring = pci_alloc_consistent(adapter->dev,
445 adapter->req_pages * PAGE_SIZE,
446 &adapter->reqRingPA);
447 if (!adapter->req_ring)
450 adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
452 adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
453 adapter->cmp_pages * PAGE_SIZE,
454 &adapter->cmpRingPA);
455 if (!adapter->cmp_ring)
458 BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
459 BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
460 BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
462 if (!adapter->use_msg)
465 adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
466 pvscsi_msg_ring_pages);
467 adapter->msg_ring = pci_alloc_consistent(adapter->dev,
468 adapter->msg_pages * PAGE_SIZE,
469 &adapter->msgRingPA);
470 if (!adapter->msg_ring)
472 BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
477 static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
479 struct PVSCSICmdDescSetupRings cmd = { 0 };
483 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
484 cmd.reqRingNumPages = adapter->req_pages;
485 cmd.cmpRingNumPages = adapter->cmp_pages;
487 base = adapter->reqRingPA;
488 for (i = 0; i < adapter->req_pages; i++) {
489 cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
493 base = adapter->cmpRingPA;
494 for (i = 0; i < adapter->cmp_pages; i++) {
495 cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
499 memset(adapter->rings_state, 0, PAGE_SIZE);
500 memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
501 memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
503 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
506 if (adapter->use_msg) {
507 struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
509 cmd_msg.numPages = adapter->msg_pages;
511 base = adapter->msgRingPA;
512 for (i = 0; i < adapter->msg_pages; i++) {
513 cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
516 memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
518 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
519 &cmd_msg, sizeof(cmd_msg));
523 static int pvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
525 if (!sdev->tagged_supported)
527 return scsi_change_queue_depth(sdev, qdepth);
531 * Pull a completion descriptor off and pass the completion back
532 * to the SCSI mid layer.
534 static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
535 const struct PVSCSIRingCmpDesc *e)
537 struct pvscsi_ctx *ctx;
538 struct scsi_cmnd *cmd;
539 struct completion *abort_cmp;
540 u32 btstat = e->hostStatus;
541 u32 sdstat = e->scsiStatus;
543 ctx = pvscsi_get_context(adapter, e->context);
545 abort_cmp = ctx->abort_cmp;
546 pvscsi_unmap_buffers(adapter, ctx);
547 pvscsi_release_context(adapter, ctx);
550 * The command was requested to be aborted. Just signal that
551 * the request completed and swallow the actual cmd completion
552 * here. The abort handler will post a completion for this
553 * command indicating that it got successfully aborted.
560 if (sdstat != SAM_STAT_GOOD &&
561 (btstat == BTSTAT_SUCCESS ||
562 btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
563 btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
564 if (sdstat == SAM_STAT_COMMAND_TERMINATED) {
565 cmd->result = (DID_RESET << 16);
567 cmd->result = (DID_OK << 16) | sdstat;
568 if (sdstat == SAM_STAT_CHECK_CONDITION &&
570 cmd->result |= (DRIVER_SENSE << 24);
575 case BTSTAT_LINKED_COMMAND_COMPLETED:
576 case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
578 * Commands like INQUIRY may transfer less data than
579 * requested by the initiator via bufflen. Set residual
580 * count to make upper layer aware of the actual amount
583 scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
584 cmd->result = (DID_OK << 16);
588 case BTSTAT_DATA_UNDERRUN:
589 /* Report residual data in underruns */
590 scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
591 cmd->result = (DID_ERROR << 16);
594 case BTSTAT_SELTIMEO:
595 /* Our emulation returns this for non-connected devs */
596 cmd->result = (DID_BAD_TARGET << 16);
599 case BTSTAT_LUNMISMATCH:
600 case BTSTAT_TAGREJECT:
602 cmd->result = (DRIVER_INVALID << 24);
605 case BTSTAT_HAHARDWARE:
606 case BTSTAT_INVPHASE:
607 case BTSTAT_HATIMEOUT:
608 case BTSTAT_NORESPONSE:
609 case BTSTAT_DISCONNECT:
610 case BTSTAT_HASOFTWARE:
612 case BTSTAT_SENSFAILED:
613 cmd->result |= (DID_ERROR << 16);
618 case BTSTAT_BUSRESET:
619 cmd->result = (DID_RESET << 16);
622 case BTSTAT_ABORTQUEUE:
623 cmd->result = (DID_BUS_BUSY << 16);
626 case BTSTAT_SCSIPARITY:
627 cmd->result = (DID_PARITY << 16);
631 cmd->result = (DID_ERROR << 16);
632 scmd_printk(KERN_DEBUG, cmd,
633 "Unknown completion status: 0x%x\n",
637 dev_dbg(&cmd->device->sdev_gendev,
638 "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
639 cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
645 * barrier usage : Since the PVSCSI device is emulated, there could be cases
646 * where we may want to serialize some accesses between the driver and the
647 * emulation layer. We use compiler barriers instead of the more expensive
648 * memory barriers because PVSCSI is only supported on X86 which has strong
649 * memory access ordering.
651 static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
653 struct PVSCSIRingsState *s = adapter->rings_state;
654 struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
655 u32 cmp_entries = s->cmpNumEntriesLog2;
657 while (s->cmpConsIdx != s->cmpProdIdx) {
658 struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
661 * This barrier() ensures that *e is not dereferenced while
662 * the device emulation still writes data into the slot.
663 * Since the device emulation advances s->cmpProdIdx only after
664 * updating the slot we want to check it first.
667 pvscsi_complete_request(adapter, e);
669 * This barrier() ensures that compiler doesn't reorder write
670 * to s->cmpConsIdx before the read of (*e) inside
671 * pvscsi_complete_request. Otherwise, device emulation may
672 * overwrite *e before we had a chance to read it.
680 * Translate a Linux SCSI request into a request ring entry.
682 static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
683 struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
685 struct PVSCSIRingsState *s;
686 struct PVSCSIRingReqDesc *e;
687 struct scsi_device *sdev;
690 s = adapter->rings_state;
692 req_entries = s->reqNumEntriesLog2;
695 * If this condition holds, we might have room on the request ring, but
696 * we might not have room on the completion ring for the response.
697 * However, we have already ruled out this possibility - we would not
698 * have successfully allocated a context if it were true, since we only
699 * have one context per request entry. Check for it anyway, since it
700 * would be a serious bug.
702 if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
703 scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
704 "ring full: reqProdIdx=%d cmpConsIdx=%d\n",
705 s->reqProdIdx, s->cmpConsIdx);
709 e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
711 e->bus = sdev->channel;
712 e->target = sdev->id;
713 memset(e->lun, 0, sizeof(e->lun));
714 e->lun[1] = sdev->lun;
716 if (cmd->sense_buffer) {
717 ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
718 SCSI_SENSE_BUFFERSIZE,
720 if (pci_dma_mapping_error(adapter->dev, ctx->sensePA)) {
721 scmd_printk(KERN_ERR, cmd,
722 "vmw_pvscsi: Failed to map sense buffer for DMA.\n");
726 e->senseAddr = ctx->sensePA;
727 e->senseLen = SCSI_SENSE_BUFFERSIZE;
732 e->cdbLen = cmd->cmd_len;
733 e->vcpuHint = smp_processor_id();
734 memcpy(e->cdb, cmd->cmnd, e->cdbLen);
736 e->tag = SIMPLE_QUEUE_TAG;
738 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
739 e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
740 else if (cmd->sc_data_direction == DMA_TO_DEVICE)
741 e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
742 else if (cmd->sc_data_direction == DMA_NONE)
743 e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
747 if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) {
748 if (cmd->sense_buffer) {
749 pci_unmap_single(adapter->dev, ctx->sensePA,
750 SCSI_SENSE_BUFFERSIZE,
757 e->context = pvscsi_map_context(adapter, ctx);
766 static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
768 struct Scsi_Host *host = cmd->device->host;
769 struct pvscsi_adapter *adapter = shost_priv(host);
770 struct pvscsi_ctx *ctx;
774 spin_lock_irqsave(&adapter->hw_lock, flags);
776 ctx = pvscsi_acquire_context(adapter, cmd);
777 if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
779 pvscsi_release_context(adapter, ctx);
780 spin_unlock_irqrestore(&adapter->hw_lock, flags);
781 return SCSI_MLQUEUE_HOST_BUSY;
784 cmd->scsi_done = done;
787 dev_dbg(&cmd->device->sdev_gendev,
788 "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op);
790 spin_unlock_irqrestore(&adapter->hw_lock, flags);
792 pvscsi_kick_io(adapter, op);
797 static DEF_SCSI_QCMD(pvscsi_queue)
799 static int pvscsi_abort(struct scsi_cmnd *cmd)
801 struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
802 struct pvscsi_ctx *ctx;
804 int result = SUCCESS;
805 DECLARE_COMPLETION_ONSTACK(abort_cmp);
808 scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
809 adapter->host->host_no, cmd);
811 spin_lock_irqsave(&adapter->hw_lock, flags);
814 * Poll the completion ring first - we might be trying to abort
815 * a command that is waiting to be dispatched in the completion ring.
817 pvscsi_process_completion_ring(adapter);
820 * If there is no context for the command, it either already succeeded
821 * or else was never properly issued. Not our problem.
823 ctx = pvscsi_find_context(adapter, cmd);
825 scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
830 * Mark that the command has been requested to be aborted and issue
833 ctx->abort_cmp = &abort_cmp;
835 pvscsi_abort_cmd(adapter, ctx);
836 spin_unlock_irqrestore(&adapter->hw_lock, flags);
837 /* Wait for 2 secs for the completion. */
838 done = wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
839 spin_lock_irqsave(&adapter->hw_lock, flags);
843 * Failed to abort the command, unmark the fact that it
844 * was requested to be aborted.
846 ctx->abort_cmp = NULL;
848 scmd_printk(KERN_DEBUG, cmd,
849 "Failed to get completion for aborted cmd %p\n",
855 * Successfully aborted the command.
857 cmd->result = (DID_ABORT << 16);
861 spin_unlock_irqrestore(&adapter->hw_lock, flags);
866 * Abort all outstanding requests. This is only safe to use if the completion
867 * ring will never be walked again or the device has been reset, because it
868 * destroys the 1-1 mapping between context field passed to emulation and our
871 static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
875 for (i = 0; i < adapter->req_depth; i++) {
876 struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
877 struct scsi_cmnd *cmd = ctx->cmd;
879 scmd_printk(KERN_ERR, cmd,
880 "Forced reset on cmd %p\n", cmd);
881 pvscsi_unmap_buffers(adapter, ctx);
882 pvscsi_release_context(adapter, ctx);
883 cmd->result = (DID_RESET << 16);
889 static int pvscsi_host_reset(struct scsi_cmnd *cmd)
891 struct Scsi_Host *host = cmd->device->host;
892 struct pvscsi_adapter *adapter = shost_priv(host);
896 scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
898 spin_lock_irqsave(&adapter->hw_lock, flags);
900 use_msg = adapter->use_msg;
903 adapter->use_msg = 0;
904 spin_unlock_irqrestore(&adapter->hw_lock, flags);
907 * Now that we know that the ISR won't add more work on the
908 * workqueue we can safely flush any outstanding work.
910 flush_workqueue(adapter->workqueue);
911 spin_lock_irqsave(&adapter->hw_lock, flags);
915 * We're going to tear down the entire ring structure and set it back
916 * up, so stalling new requests until all completions are flushed and
917 * the rings are back in place.
920 pvscsi_process_request_ring(adapter);
922 ll_adapter_reset(adapter);
925 * Now process any completions. Note we do this AFTER adapter reset,
926 * which is strange, but stops races where completions get posted
927 * between processing the ring and issuing the reset. The backend will
928 * not touch the ring memory after reset, so the immediately pre-reset
929 * completion ring state is still valid.
931 pvscsi_process_completion_ring(adapter);
933 pvscsi_reset_all(adapter);
934 adapter->use_msg = use_msg;
935 pvscsi_setup_all_rings(adapter);
936 pvscsi_unmask_intr(adapter);
938 spin_unlock_irqrestore(&adapter->hw_lock, flags);
943 static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
945 struct Scsi_Host *host = cmd->device->host;
946 struct pvscsi_adapter *adapter = shost_priv(host);
949 scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
952 * We don't want to queue new requests for this bus after
953 * flushing all pending requests to emulation, since new
954 * requests could then sneak in during this bus reset phase,
955 * so take the lock now.
957 spin_lock_irqsave(&adapter->hw_lock, flags);
959 pvscsi_process_request_ring(adapter);
960 ll_bus_reset(adapter);
961 pvscsi_process_completion_ring(adapter);
963 spin_unlock_irqrestore(&adapter->hw_lock, flags);
968 static int pvscsi_device_reset(struct scsi_cmnd *cmd)
970 struct Scsi_Host *host = cmd->device->host;
971 struct pvscsi_adapter *adapter = shost_priv(host);
974 scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
975 host->host_no, cmd->device->id);
978 * We don't want to queue new requests for this device after flushing
979 * all pending requests to emulation, since new requests could then
980 * sneak in during this device reset phase, so take the lock now.
982 spin_lock_irqsave(&adapter->hw_lock, flags);
984 pvscsi_process_request_ring(adapter);
985 ll_device_reset(adapter, cmd->device->id);
986 pvscsi_process_completion_ring(adapter);
988 spin_unlock_irqrestore(&adapter->hw_lock, flags);
993 static struct scsi_host_template pvscsi_template;
995 static const char *pvscsi_info(struct Scsi_Host *host)
997 struct pvscsi_adapter *adapter = shost_priv(host);
998 static char buf[256];
1000 sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
1001 "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
1002 adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
1003 pvscsi_template.cmd_per_lun);
1008 static struct scsi_host_template pvscsi_template = {
1009 .module = THIS_MODULE,
1010 .name = "VMware PVSCSI Host Adapter",
1011 .proc_name = "vmw_pvscsi",
1012 .info = pvscsi_info,
1013 .queuecommand = pvscsi_queue,
1015 .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
1016 .dma_boundary = UINT_MAX,
1017 .max_sectors = 0xffff,
1018 .use_clustering = ENABLE_CLUSTERING,
1019 .change_queue_depth = pvscsi_change_queue_depth,
1020 .eh_abort_handler = pvscsi_abort,
1021 .eh_device_reset_handler = pvscsi_device_reset,
1022 .eh_bus_reset_handler = pvscsi_bus_reset,
1023 .eh_host_reset_handler = pvscsi_host_reset,
1026 static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
1027 const struct PVSCSIRingMsgDesc *e)
1029 struct PVSCSIRingsState *s = adapter->rings_state;
1030 struct Scsi_Host *host = adapter->host;
1031 struct scsi_device *sdev;
1033 printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
1034 e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
1036 BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
1038 if (e->type == PVSCSI_MSG_DEV_ADDED) {
1039 struct PVSCSIMsgDescDevStatusChanged *desc;
1040 desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
1043 "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n",
1044 desc->bus, desc->target, desc->lun[1]);
1046 if (!scsi_host_get(host))
1049 sdev = scsi_device_lookup(host, desc->bus, desc->target,
1052 printk(KERN_INFO "vmw_pvscsi: device already exists\n");
1053 scsi_device_put(sdev);
1055 scsi_add_device(adapter->host, desc->bus,
1056 desc->target, desc->lun[1]);
1058 scsi_host_put(host);
1059 } else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
1060 struct PVSCSIMsgDescDevStatusChanged *desc;
1061 desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
1064 "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n",
1065 desc->bus, desc->target, desc->lun[1]);
1067 if (!scsi_host_get(host))
1070 sdev = scsi_device_lookup(host, desc->bus, desc->target,
1073 scsi_remove_device(sdev);
1074 scsi_device_put(sdev);
1077 "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n",
1078 desc->bus, desc->target, desc->lun[1]);
1080 scsi_host_put(host);
1084 static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
1086 struct PVSCSIRingsState *s = adapter->rings_state;
1088 return s->msgProdIdx != s->msgConsIdx;
1091 static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
1093 struct PVSCSIRingsState *s = adapter->rings_state;
1094 struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
1095 u32 msg_entries = s->msgNumEntriesLog2;
1097 while (pvscsi_msg_pending(adapter)) {
1098 struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
1102 pvscsi_process_msg(adapter, e);
1108 static void pvscsi_msg_workqueue_handler(struct work_struct *data)
1110 struct pvscsi_adapter *adapter;
1112 adapter = container_of(data, struct pvscsi_adapter, work);
1114 pvscsi_process_msg_ring(adapter);
1117 static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
1121 if (!pvscsi_use_msg)
1124 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
1125 PVSCSI_CMD_SETUP_MSG_RING);
1127 if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
1130 snprintf(name, sizeof(name),
1131 "vmw_pvscsi_wq_%u", adapter->host->host_no);
1133 adapter->workqueue = create_singlethread_workqueue(name);
1134 if (!adapter->workqueue) {
1135 printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
1138 INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
1143 static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter,
1148 if (!pvscsi_use_req_threshold)
1151 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
1152 PVSCSI_CMD_SETUP_REQCALLTHRESHOLD);
1153 val = pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS);
1155 printk(KERN_INFO "vmw_pvscsi: device does not support req_threshold\n");
1158 struct PVSCSICmdDescSetupReqCall cmd_msg = { 0 };
1159 cmd_msg.enable = enable;
1161 "vmw_pvscsi: %sabling reqCallThreshold\n",
1162 enable ? "en" : "dis");
1163 pvscsi_write_cmd_desc(adapter,
1164 PVSCSI_CMD_SETUP_REQCALLTHRESHOLD,
1165 &cmd_msg, sizeof(cmd_msg));
1166 return pvscsi_reg_read(adapter,
1167 PVSCSI_REG_OFFSET_COMMAND_STATUS) != 0;
1171 static irqreturn_t pvscsi_isr(int irq, void *devp)
1173 struct pvscsi_adapter *adapter = devp;
1174 unsigned long flags;
1176 spin_lock_irqsave(&adapter->hw_lock, flags);
1177 pvscsi_process_completion_ring(adapter);
1178 if (adapter->use_msg && pvscsi_msg_pending(adapter))
1179 queue_work(adapter->workqueue, &adapter->work);
1180 spin_unlock_irqrestore(&adapter->hw_lock, flags);
1185 static irqreturn_t pvscsi_shared_isr(int irq, void *devp)
1187 struct pvscsi_adapter *adapter = devp;
1188 u32 val = pvscsi_read_intr_status(adapter);
1190 if (!(val & PVSCSI_INTR_ALL_SUPPORTED))
1192 pvscsi_write_intr_status(devp, val);
1193 return pvscsi_isr(irq, devp);
1196 static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
1198 struct pvscsi_ctx *ctx = adapter->cmd_map;
1201 for (i = 0; i < adapter->req_depth; ++i, ++ctx)
1202 free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
1205 static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
1207 free_irq(pci_irq_vector(adapter->dev, 0), adapter);
1208 pci_free_irq_vectors(adapter->dev);
1211 static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
1213 if (adapter->workqueue)
1214 destroy_workqueue(adapter->workqueue);
1216 if (adapter->mmioBase)
1217 pci_iounmap(adapter->dev, adapter->mmioBase);
1219 pci_release_regions(adapter->dev);
1221 if (adapter->cmd_map) {
1222 pvscsi_free_sgls(adapter);
1223 kfree(adapter->cmd_map);
1226 if (adapter->rings_state)
1227 pci_free_consistent(adapter->dev, PAGE_SIZE,
1228 adapter->rings_state, adapter->ringStatePA);
1230 if (adapter->req_ring)
1231 pci_free_consistent(adapter->dev,
1232 adapter->req_pages * PAGE_SIZE,
1233 adapter->req_ring, adapter->reqRingPA);
1235 if (adapter->cmp_ring)
1236 pci_free_consistent(adapter->dev,
1237 adapter->cmp_pages * PAGE_SIZE,
1238 adapter->cmp_ring, adapter->cmpRingPA);
1240 if (adapter->msg_ring)
1241 pci_free_consistent(adapter->dev,
1242 adapter->msg_pages * PAGE_SIZE,
1243 adapter->msg_ring, adapter->msgRingPA);
1247 * Allocate scatter gather lists.
1249 * These are statically allocated. Trying to be clever was not worth it.
1251 * Dynamic allocation can fail, and we can't go deep into the memory
1252 * allocator, since we're a SCSI driver, and trying too hard to allocate
1253 * memory might generate disk I/O. We also don't want to fail disk I/O
1254 * in that case because we can't get an allocation - the I/O could be
1255 * trying to swap out data to free memory. Since that is pathological,
1256 * just use a statically allocated scatter list.
1259 static int pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
1261 struct pvscsi_ctx *ctx;
1264 ctx = adapter->cmd_map;
1265 BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE);
1267 for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
1268 ctx->sgl = (void *)__get_free_pages(GFP_KERNEL,
1269 get_order(SGL_SIZE));
1271 BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
1273 for (; i >= 0; --i, --ctx) {
1274 free_pages((unsigned long)ctx->sgl,
1275 get_order(SGL_SIZE));
1286 * Query the device, fetch the config info and return the
1287 * maximum number of targets on the adapter. In case of
1288 * failure due to any reason return default i.e. 16.
1290 static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
1292 struct PVSCSICmdDescConfigCmd cmd;
1293 struct PVSCSIConfigPageHeader *header;
1295 dma_addr_t configPagePA;
1299 dev = pvscsi_dev(adapter);
1300 config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
1303 dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
1306 BUG_ON(configPagePA & ~PAGE_MASK);
1308 /* Fetch config info from the device. */
1309 cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32;
1310 cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER;
1311 cmd.cmpAddr = configPagePA;
1315 * Mark the completion page header with error values. If the device
1316 * completes the command successfully, it sets the status values to
1319 header = config_page;
1320 memset(header, 0, sizeof *header);
1321 header->hostStatus = BTSTAT_INVPARAM;
1322 header->scsiStatus = SDSTAT_CHECK;
1324 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd);
1326 if (header->hostStatus == BTSTAT_SUCCESS &&
1327 header->scsiStatus == SDSTAT_GOOD) {
1328 struct PVSCSIConfigPageController *config;
1330 config = config_page;
1331 numPhys = config->numPhys;
1333 dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
1334 header->hostStatus, header->scsiStatus);
1335 pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
1340 static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1342 unsigned int irq_flag = PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY;
1343 struct pvscsi_adapter *adapter;
1344 struct pvscsi_adapter adapter_temp;
1345 struct Scsi_Host *host = NULL;
1352 if (pci_enable_device(pdev))
1355 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
1356 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
1357 printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
1358 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
1359 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
1360 printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
1362 printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
1363 goto out_disable_device;
1367 * Let's use a temp pvscsi_adapter struct until we find the number of
1368 * targets on the adapter, after that we will switch to the real
1371 adapter = &adapter_temp;
1372 memset(adapter, 0, sizeof(*adapter));
1373 adapter->dev = pdev;
1374 adapter->rev = pdev->revision;
1376 if (pci_request_regions(pdev, "vmw_pvscsi")) {
1377 printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
1378 goto out_disable_device;
1381 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1382 if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
1385 if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
1391 if (i == DEVICE_COUNT_RESOURCE) {
1393 "vmw_pvscsi: adapter has no suitable MMIO region\n");
1394 goto out_release_resources_and_disable;
1397 adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
1399 if (!adapter->mmioBase) {
1401 "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
1402 i, PVSCSI_MEM_SPACE_SIZE);
1403 goto out_release_resources_and_disable;
1406 pci_set_master(pdev);
1409 * Ask the device for max number of targets before deciding the
1410 * default pvscsi_ring_pages value.
1412 max_id = pvscsi_get_max_targets(adapter);
1413 printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id);
1415 if (pvscsi_ring_pages == 0)
1417 * Set the right default value. Up to 16 it is 8, above it is
1420 pvscsi_ring_pages = (max_id > 16) ?
1421 PVSCSI_SETUP_RINGS_MAX_NUM_PAGES :
1422 PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
1424 "vmw_pvscsi: setting ring_pages to %d\n",
1427 pvscsi_template.can_queue =
1428 min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
1429 PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
1430 pvscsi_template.cmd_per_lun =
1431 min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
1432 host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
1434 printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
1435 goto out_release_resources_and_disable;
1439 * Let's use the real pvscsi_adapter struct here onwards.
1441 adapter = shost_priv(host);
1442 memset(adapter, 0, sizeof(*adapter));
1443 adapter->dev = pdev;
1444 adapter->host = host;
1446 * Copy back what we already have to the allocated adapter struct.
1448 adapter->rev = adapter_temp.rev;
1449 adapter->mmioBase = adapter_temp.mmioBase;
1451 spin_lock_init(&adapter->hw_lock);
1452 host->max_channel = 0;
1454 host->max_cmd_len = 16;
1455 host->max_id = max_id;
1457 pci_set_drvdata(pdev, host);
1459 ll_adapter_reset(adapter);
1461 adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
1463 error = pvscsi_allocate_rings(adapter);
1465 printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
1466 goto out_release_resources;
1470 * From this point on we should reset the adapter if anything goes
1473 pvscsi_setup_all_rings(adapter);
1475 adapter->cmd_map = kcalloc(adapter->req_depth,
1476 sizeof(struct pvscsi_ctx), GFP_KERNEL);
1477 if (!adapter->cmd_map) {
1478 printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
1480 goto out_reset_adapter;
1483 INIT_LIST_HEAD(&adapter->cmd_pool);
1484 for (i = 0; i < adapter->req_depth; i++) {
1485 struct pvscsi_ctx *ctx = adapter->cmd_map + i;
1486 list_add(&ctx->list, &adapter->cmd_pool);
1489 error = pvscsi_allocate_sg(adapter);
1491 printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
1492 goto out_reset_adapter;
1495 if (pvscsi_disable_msix)
1496 irq_flag &= ~PCI_IRQ_MSIX;
1497 if (pvscsi_disable_msi)
1498 irq_flag &= ~PCI_IRQ_MSI;
1500 error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag);
1502 goto out_reset_adapter;
1504 adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
1505 printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n",
1506 adapter->use_req_threshold ? "en" : "dis");
1508 if (adapter->dev->msix_enabled || adapter->dev->msi_enabled) {
1509 printk(KERN_INFO "vmw_pvscsi: using MSI%s\n",
1510 adapter->dev->msix_enabled ? "-X" : "");
1511 error = request_irq(pci_irq_vector(pdev, 0), pvscsi_isr,
1512 0, "vmw_pvscsi", adapter);
1514 printk(KERN_INFO "vmw_pvscsi: using INTx\n");
1515 error = request_irq(pci_irq_vector(pdev, 0), pvscsi_shared_isr,
1516 IRQF_SHARED, "vmw_pvscsi", adapter);
1521 "vmw_pvscsi: unable to request IRQ: %d\n", error);
1522 goto out_reset_adapter;
1525 error = scsi_add_host(host, &pdev->dev);
1528 "vmw_pvscsi: scsi_add_host failed: %d\n", error);
1529 goto out_reset_adapter;
1532 dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
1533 adapter->rev, host->host_no);
1535 pvscsi_unmask_intr(adapter);
1537 scsi_scan_host(host);
1542 ll_adapter_reset(adapter);
1543 out_release_resources:
1544 pvscsi_shutdown_intr(adapter);
1545 pvscsi_release_resources(adapter);
1546 scsi_host_put(host);
1548 pci_disable_device(pdev);
1552 out_release_resources_and_disable:
1553 pvscsi_shutdown_intr(adapter);
1554 pvscsi_release_resources(adapter);
1555 goto out_disable_device;
1558 static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
1560 pvscsi_mask_intr(adapter);
1562 if (adapter->workqueue)
1563 flush_workqueue(adapter->workqueue);
1565 pvscsi_shutdown_intr(adapter);
1567 pvscsi_process_request_ring(adapter);
1568 pvscsi_process_completion_ring(adapter);
1569 ll_adapter_reset(adapter);
1572 static void pvscsi_shutdown(struct pci_dev *dev)
1574 struct Scsi_Host *host = pci_get_drvdata(dev);
1575 struct pvscsi_adapter *adapter = shost_priv(host);
1577 __pvscsi_shutdown(adapter);
1580 static void pvscsi_remove(struct pci_dev *pdev)
1582 struct Scsi_Host *host = pci_get_drvdata(pdev);
1583 struct pvscsi_adapter *adapter = shost_priv(host);
1585 scsi_remove_host(host);
1587 __pvscsi_shutdown(adapter);
1588 pvscsi_release_resources(adapter);
1590 scsi_host_put(host);
1592 pci_disable_device(pdev);
1595 static struct pci_driver pvscsi_pci_driver = {
1596 .name = "vmw_pvscsi",
1597 .id_table = pvscsi_pci_tbl,
1598 .probe = pvscsi_probe,
1599 .remove = pvscsi_remove,
1600 .shutdown = pvscsi_shutdown,
1603 static int __init pvscsi_init(void)
1605 pr_info("%s - version %s\n",
1606 PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
1607 return pci_register_driver(&pvscsi_pci_driver);
1610 static void __exit pvscsi_exit(void)
1612 pci_unregister_driver(&pvscsi_pci_driver);
1615 module_init(pvscsi_init);
1616 module_exit(pvscsi_exit);