1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2009, Microsoft Corporation.
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 * K. Y. Srinivasan <kys@microsoft.com>
11 #include <linux/kernel.h>
12 #include <linux/wait.h>
13 #include <linux/sched.h>
14 #include <linux/completion.h>
15 #include <linux/string.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/slab.h>
20 #include <linux/module.h>
21 #include <linux/device.h>
22 #include <linux/hyperv.h>
23 #include <linux/blkdev.h>
24 #include <scsi/scsi.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_tcq.h>
29 #include <scsi/scsi_eh.h>
30 #include <scsi/scsi_devinfo.h>
31 #include <scsi/scsi_dbg.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <scsi/scsi_transport.h>
36 * All wire protocol details (storage protocol between the guest and the host)
37 * are consolidated here.
39 * Begin protocol definitions.
45 * V1 RC < 2008/1/31: 1.0
46 * V1 RC > 2008/1/31: 2.0
53 #define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
56 #define VMSTOR_PROTO_VERSION_WIN6 VMSTOR_PROTO_VERSION(2, 0)
57 #define VMSTOR_PROTO_VERSION_WIN7 VMSTOR_PROTO_VERSION(4, 2)
58 #define VMSTOR_PROTO_VERSION_WIN8 VMSTOR_PROTO_VERSION(5, 1)
59 #define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0)
60 #define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2)
62 /* Packet structure describing virtual storage requests. */
63 enum vstor_packet_operation {
64 VSTOR_OPERATION_COMPLETE_IO = 1,
65 VSTOR_OPERATION_REMOVE_DEVICE = 2,
66 VSTOR_OPERATION_EXECUTE_SRB = 3,
67 VSTOR_OPERATION_RESET_LUN = 4,
68 VSTOR_OPERATION_RESET_ADAPTER = 5,
69 VSTOR_OPERATION_RESET_BUS = 6,
70 VSTOR_OPERATION_BEGIN_INITIALIZATION = 7,
71 VSTOR_OPERATION_END_INITIALIZATION = 8,
72 VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9,
73 VSTOR_OPERATION_QUERY_PROPERTIES = 10,
74 VSTOR_OPERATION_ENUMERATE_BUS = 11,
75 VSTOR_OPERATION_FCHBA_DATA = 12,
76 VSTOR_OPERATION_CREATE_SUB_CHANNELS = 13,
77 VSTOR_OPERATION_MAXIMUM = 13
81 * WWN packet for Fibre Channel HBA
84 struct hv_fc_wwn_packet {
87 u8 primary_port_wwn[8];
88 u8 primary_node_wwn[8];
89 u8 secondary_port_wwn[8];
90 u8 secondary_node_wwn[8];
99 #define SRB_FLAGS_QUEUE_ACTION_ENABLE 0x00000002
100 #define SRB_FLAGS_DISABLE_DISCONNECT 0x00000004
101 #define SRB_FLAGS_DISABLE_SYNCH_TRANSFER 0x00000008
102 #define SRB_FLAGS_BYPASS_FROZEN_QUEUE 0x00000010
103 #define SRB_FLAGS_DISABLE_AUTOSENSE 0x00000020
104 #define SRB_FLAGS_DATA_IN 0x00000040
105 #define SRB_FLAGS_DATA_OUT 0x00000080
106 #define SRB_FLAGS_NO_DATA_TRANSFER 0x00000000
107 #define SRB_FLAGS_UNSPECIFIED_DIRECTION (SRB_FLAGS_DATA_IN | SRB_FLAGS_DATA_OUT)
108 #define SRB_FLAGS_NO_QUEUE_FREEZE 0x00000100
109 #define SRB_FLAGS_ADAPTER_CACHE_ENABLE 0x00000200
110 #define SRB_FLAGS_FREE_SENSE_BUFFER 0x00000400
113 * This flag indicates the request is part of the workflow for processing a D3.
115 #define SRB_FLAGS_D3_PROCESSING 0x00000800
116 #define SRB_FLAGS_IS_ACTIVE 0x00010000
117 #define SRB_FLAGS_ALLOCATED_FROM_ZONE 0x00020000
118 #define SRB_FLAGS_SGLIST_FROM_POOL 0x00040000
119 #define SRB_FLAGS_BYPASS_LOCKED_QUEUE 0x00080000
120 #define SRB_FLAGS_NO_KEEP_AWAKE 0x00100000
121 #define SRB_FLAGS_PORT_DRIVER_ALLOCSENSE 0x00200000
122 #define SRB_FLAGS_PORT_DRIVER_SENSEHASPORT 0x00400000
123 #define SRB_FLAGS_DONT_START_NEXT_PACKET 0x00800000
124 #define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000
125 #define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000
127 #define SP_UNTAGGED ((unsigned char) ~0)
128 #define SRB_SIMPLE_TAG_REQUEST 0x20
131 * Platform neutral description of a scsi request -
132 * this remains the same across the write regardless of 32/64 bit
133 * note: it's patterned off the SCSI_PASS_THROUGH structure
135 #define STORVSC_MAX_CMD_LEN 0x10
137 #define POST_WIN7_STORVSC_SENSE_BUFFER_SIZE 0x14
138 #define PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE 0x12
140 #define STORVSC_SENSE_BUFFER_SIZE 0x14
141 #define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14
144 * Sense buffer size changed in win8; have a run-time
145 * variable to track the size we should use. This value will
146 * likely change during protocol negotiation but it is valid
147 * to start by assuming pre-Win8.
149 static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
152 * The storage protocol version is determined during the
153 * initial exchange with the host. It will indicate which
154 * storage functionality is available in the host.
156 static int vmstor_proto_version;
158 #define STORVSC_LOGGING_NONE 0
159 #define STORVSC_LOGGING_ERROR 1
160 #define STORVSC_LOGGING_WARN 2
162 static int logging_level = STORVSC_LOGGING_ERROR;
163 module_param(logging_level, int, S_IRUGO|S_IWUSR);
164 MODULE_PARM_DESC(logging_level,
165 "Logging level, 0 - None, 1 - Error (default), 2 - Warning.");
167 static inline bool do_logging(int level)
169 return logging_level >= level;
172 #define storvsc_log(dev, level, fmt, ...) \
174 if (do_logging(level)) \
175 dev_warn(&(dev)->device, fmt, ##__VA_ARGS__); \
178 struct vmscsi_win8_extension {
180 * The following were added in Windows 8
190 struct vmscsi_request {
201 u8 sense_info_length;
205 u32 data_transfer_length;
208 u8 cdb[STORVSC_MAX_CMD_LEN];
209 u8 sense_data[STORVSC_SENSE_BUFFER_SIZE];
210 u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING];
213 * The following was added in win8.
215 struct vmscsi_win8_extension win8_extension;
217 } __attribute((packed));
221 * The size of the vmscsi_request has changed in win8. The
222 * additional size is because of new elements added to the
223 * structure. These elements are valid only when we are talking
225 * Track the correction to size we need to apply. This value
226 * will likely change during protocol negotiation but it is
227 * valid to start by assuming pre-Win8.
229 static int vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
232 * The list of storage protocols in order of preference.
234 struct vmstor_protocol {
235 int protocol_version;
236 int sense_buffer_size;
237 int vmscsi_size_delta;
241 static const struct vmstor_protocol vmstor_protocols[] = {
243 VMSTOR_PROTO_VERSION_WIN10,
244 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
248 VMSTOR_PROTO_VERSION_WIN8_1,
249 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
253 VMSTOR_PROTO_VERSION_WIN8,
254 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
258 VMSTOR_PROTO_VERSION_WIN7,
259 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
260 sizeof(struct vmscsi_win8_extension),
263 VMSTOR_PROTO_VERSION_WIN6,
264 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
265 sizeof(struct vmscsi_win8_extension),
271 * This structure is sent during the initialization phase to get the different
272 * properties of the channel.
275 #define STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL 0x1
277 struct vmstorage_channel_properties {
283 u32 max_transfer_bytes;
288 /* This structure is sent during the storage protocol negotiations. */
289 struct vmstorage_protocol_version {
290 /* Major (MSW) and minor (LSW) version numbers. */
294 * Revision number is auto-incremented whenever this file is changed
295 * (See FILL_VMSTOR_REVISION macro above). Mismatch does not
296 * definitely indicate incompatibility--but it does indicate mismatched
298 * This is only used on the windows side. Just set it to 0.
303 /* Channel Property Flags */
304 #define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1
305 #define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2
307 struct vstor_packet {
308 /* Requested operation type */
309 enum vstor_packet_operation operation;
311 /* Flags - see below for values */
314 /* Status of the request returned from the server side. */
317 /* Data payload area */
320 * Structure used to forward SCSI commands from the
321 * client to the server.
323 struct vmscsi_request vm_srb;
325 /* Structure used to query channel properties. */
326 struct vmstorage_channel_properties storage_channel_properties;
328 /* Used during version negotiations. */
329 struct vmstorage_protocol_version version;
331 /* Fibre channel address packet */
332 struct hv_fc_wwn_packet wwn_packet;
334 /* Number of sub-channels to create */
335 u16 sub_channel_count;
337 /* This will be the maximum of the union members */
345 * This flag indicates that the server should send back a completion for this
349 #define REQUEST_COMPLETION_FLAG 0x1
351 /* Matches Windows-end */
352 enum storvsc_request_type {
359 * SRB status codes and masks. In the 8-bit field, the two high order bits
360 * are flags, while the remaining 6 bits are an integer status code. The
361 * definitions here include only the subset of the integer status codes that
362 * are tested for in this driver.
364 #define SRB_STATUS_AUTOSENSE_VALID 0x80
365 #define SRB_STATUS_QUEUE_FROZEN 0x40
367 /* SRB status integer codes */
368 #define SRB_STATUS_SUCCESS 0x01
369 #define SRB_STATUS_ABORTED 0x02
370 #define SRB_STATUS_ERROR 0x04
371 #define SRB_STATUS_INVALID_REQUEST 0x06
372 #define SRB_STATUS_DATA_OVERRUN 0x12
373 #define SRB_STATUS_INVALID_LUN 0x20
375 #define SRB_STATUS(status) \
376 (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN))
378 * This is the end of Protocol specific defines.
381 static int storvsc_ringbuffer_size = (128 * 1024);
382 static u32 max_outstanding_req_per_channel;
383 static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
385 static int storvsc_vcpus_per_sub_channel = 4;
387 module_param(storvsc_ringbuffer_size, int, S_IRUGO);
388 MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
390 module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO);
391 MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
393 static int ring_avail_percent_lowater = 10;
394 module_param(ring_avail_percent_lowater, int, S_IRUGO);
395 MODULE_PARM_DESC(ring_avail_percent_lowater,
396 "Select a channel if available ring size > this in percent");
399 * Timeout in seconds for all devices managed by this driver.
401 static int storvsc_timeout = 180;
403 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
404 static struct scsi_transport_template *fc_transport_template;
407 static void storvsc_on_channel_callback(void *context);
409 #define STORVSC_MAX_LUNS_PER_TARGET 255
410 #define STORVSC_MAX_TARGETS 2
411 #define STORVSC_MAX_CHANNELS 8
413 #define STORVSC_FC_MAX_LUNS_PER_TARGET 255
414 #define STORVSC_FC_MAX_TARGETS 128
415 #define STORVSC_FC_MAX_CHANNELS 8
417 #define STORVSC_IDE_MAX_LUNS_PER_TARGET 64
418 #define STORVSC_IDE_MAX_TARGETS 1
419 #define STORVSC_IDE_MAX_CHANNELS 1
421 struct storvsc_cmd_request {
422 struct scsi_cmnd *cmd;
424 struct hv_device *device;
426 /* Synchronize the request/response if needed */
427 struct completion wait_event;
429 struct vmbus_channel_packet_multipage_buffer mpb;
430 struct vmbus_packet_mpb_array *payload;
433 struct vstor_packet vstor_packet;
437 /* A storvsc device is a device object that contains a vmbus channel */
438 struct storvsc_device {
439 struct hv_device *device;
443 atomic_t num_outstanding_req;
444 struct Scsi_Host *host;
446 wait_queue_head_t waiting_to_drain;
449 * Each unique Port/Path/Target represents 1 channel ie scsi
450 * controller. In reality, the pathid, targetid is always 0
451 * and the port is set by us
453 unsigned int port_number;
454 unsigned char path_id;
455 unsigned char target_id;
458 * Max I/O, the device can support.
460 u32 max_transfer_bytes;
462 * Number of sub-channels we will open.
465 struct vmbus_channel **stor_chns;
467 * Mask of CPUs bound to subchannels.
469 struct cpumask alloced_cpus;
471 * Serializes modifications of stor_chns[] from storvsc_do_io()
472 * and storvsc_change_target_cpu().
475 /* Used for vsc/vsp channel reset process */
476 struct storvsc_cmd_request init_request;
477 struct storvsc_cmd_request reset_request;
479 * Currently active port and node names for FC devices.
483 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
484 struct fc_rport *rport;
488 struct hv_host_device {
489 struct hv_device *dev;
492 unsigned char target;
493 struct workqueue_struct *handle_error_wq;
494 struct work_struct host_scan_work;
495 struct Scsi_Host *host;
498 struct storvsc_scan_work {
499 struct work_struct work;
500 struct Scsi_Host *host;
505 static void storvsc_device_scan(struct work_struct *work)
507 struct storvsc_scan_work *wrk;
508 struct scsi_device *sdev;
510 wrk = container_of(work, struct storvsc_scan_work, work);
512 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
515 scsi_rescan_device(&sdev->sdev_gendev);
516 scsi_device_put(sdev);
522 static void storvsc_host_scan(struct work_struct *work)
524 struct Scsi_Host *host;
525 struct scsi_device *sdev;
526 struct hv_host_device *host_device =
527 container_of(work, struct hv_host_device, host_scan_work);
529 host = host_device->host;
531 * Before scanning the host, first check to see if any of the
532 * currrently known devices have been hot removed. We issue a
533 * "unit ready" command against all currently known devices.
534 * This I/O will result in an error for devices that have been
535 * removed. As part of handling the I/O error, we remove the device.
537 * When a LUN is added or removed, the host sends us a signal to
538 * scan the host. Thus we are forced to discover the LUNs that
539 * may have been removed this way.
541 mutex_lock(&host->scan_mutex);
542 shost_for_each_device(sdev, host)
543 scsi_test_unit_ready(sdev, 1, 1, NULL);
544 mutex_unlock(&host->scan_mutex);
546 * Now scan the host to discover LUNs that may have been added.
548 scsi_scan_host(host);
551 static void storvsc_remove_lun(struct work_struct *work)
553 struct storvsc_scan_work *wrk;
554 struct scsi_device *sdev;
556 wrk = container_of(work, struct storvsc_scan_work, work);
557 if (!scsi_host_get(wrk->host))
560 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
563 scsi_remove_device(sdev);
564 scsi_device_put(sdev);
566 scsi_host_put(wrk->host);
574 * We can get incoming messages from the host that are not in response to
575 * messages that we have sent out. An example of this would be messages
576 * received by the guest to notify dynamic addition/removal of LUNs. To
577 * deal with potential race conditions where the driver may be in the
578 * midst of being unloaded when we might receive an unsolicited message
579 * from the host, we have implemented a mechanism to gurantee sequential
582 * 1) Once the device is marked as being destroyed, we will fail all
584 * 2) We permit incoming messages when the device is being destroyed,
585 * only to properly account for messages already sent out.
588 static inline struct storvsc_device *get_out_stor_device(
589 struct hv_device *device)
591 struct storvsc_device *stor_device;
593 stor_device = hv_get_drvdata(device);
595 if (stor_device && stor_device->destroy)
602 static inline void storvsc_wait_to_drain(struct storvsc_device *dev)
604 dev->drain_notify = true;
605 wait_event(dev->waiting_to_drain,
606 atomic_read(&dev->num_outstanding_req) == 0);
607 dev->drain_notify = false;
610 static inline struct storvsc_device *get_in_stor_device(
611 struct hv_device *device)
613 struct storvsc_device *stor_device;
615 stor_device = hv_get_drvdata(device);
621 * If the device is being destroyed; allow incoming
622 * traffic only to cleanup outstanding requests.
625 if (stor_device->destroy &&
626 (atomic_read(&stor_device->num_outstanding_req) == 0))
634 static void storvsc_change_target_cpu(struct vmbus_channel *channel, u32 old,
637 struct storvsc_device *stor_device;
638 struct vmbus_channel *cur_chn;
639 bool old_is_alloced = false;
640 struct hv_device *device;
644 device = channel->primary_channel ?
645 channel->primary_channel->device_obj
646 : channel->device_obj;
647 stor_device = get_out_stor_device(device);
651 /* See storvsc_do_io() -> get_og_chn(). */
652 spin_lock_irqsave(&stor_device->lock, flags);
655 * Determines if the storvsc device has other channels assigned to
656 * the "old" CPU to update the alloced_cpus mask and the stor_chns
659 if (device->channel != channel && device->channel->target_cpu == old) {
660 cur_chn = device->channel;
661 old_is_alloced = true;
664 list_for_each_entry(cur_chn, &device->channel->sc_list, sc_list) {
665 if (cur_chn == channel)
667 if (cur_chn->target_cpu == old) {
668 old_is_alloced = true;
675 WRITE_ONCE(stor_device->stor_chns[old], cur_chn);
677 cpumask_clear_cpu(old, &stor_device->alloced_cpus);
679 /* "Flush" the stor_chns array. */
680 for_each_possible_cpu(cpu) {
681 if (stor_device->stor_chns[cpu] && !cpumask_test_cpu(
682 cpu, &stor_device->alloced_cpus))
683 WRITE_ONCE(stor_device->stor_chns[cpu], NULL);
686 WRITE_ONCE(stor_device->stor_chns[new], channel);
687 cpumask_set_cpu(new, &stor_device->alloced_cpus);
689 spin_unlock_irqrestore(&stor_device->lock, flags);
692 static void handle_sc_creation(struct vmbus_channel *new_sc)
694 struct hv_device *device = new_sc->primary_channel->device_obj;
695 struct device *dev = &device->device;
696 struct storvsc_device *stor_device;
697 struct vmstorage_channel_properties props;
700 stor_device = get_out_stor_device(device);
704 memset(&props, 0, sizeof(struct vmstorage_channel_properties));
706 ret = vmbus_open(new_sc,
707 storvsc_ringbuffer_size,
708 storvsc_ringbuffer_size,
710 sizeof(struct vmstorage_channel_properties),
711 storvsc_on_channel_callback, new_sc);
713 /* In case vmbus_open() fails, we don't use the sub-channel. */
715 dev_err(dev, "Failed to open sub-channel: err=%d\n", ret);
719 new_sc->change_target_cpu_callback = storvsc_change_target_cpu;
721 /* Add the sub-channel to the array of available channels. */
722 stor_device->stor_chns[new_sc->target_cpu] = new_sc;
723 cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
726 static void handle_multichannel_storage(struct hv_device *device, int max_chns)
728 struct device *dev = &device->device;
729 struct storvsc_device *stor_device;
731 struct storvsc_cmd_request *request;
732 struct vstor_packet *vstor_packet;
736 * If the number of CPUs is artificially restricted, such as
737 * with maxcpus=1 on the kernel boot line, Hyper-V could offer
738 * sub-channels >= the number of CPUs. These sub-channels
739 * should not be created. The primary channel is already created
740 * and assigned to one CPU, so check against # CPUs - 1.
742 num_sc = min((int)(num_online_cpus() - 1), max_chns);
746 stor_device = get_out_stor_device(device);
750 stor_device->num_sc = num_sc;
751 request = &stor_device->init_request;
752 vstor_packet = &request->vstor_packet;
755 * Establish a handler for dealing with subchannels.
757 vmbus_set_sc_create_callback(device->channel, handle_sc_creation);
760 * Request the host to create sub-channels.
762 memset(request, 0, sizeof(struct storvsc_cmd_request));
763 init_completion(&request->wait_event);
764 vstor_packet->operation = VSTOR_OPERATION_CREATE_SUB_CHANNELS;
765 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
766 vstor_packet->sub_channel_count = num_sc;
768 ret = vmbus_sendpacket(device->channel, vstor_packet,
769 (sizeof(struct vstor_packet) -
771 (unsigned long)request,
773 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
776 dev_err(dev, "Failed to create sub-channel: err=%d\n", ret);
780 t = wait_for_completion_timeout(&request->wait_event, 10*HZ);
782 dev_err(dev, "Failed to create sub-channel: timed out\n");
786 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
787 vstor_packet->status != 0) {
788 dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n",
789 vstor_packet->operation, vstor_packet->status);
794 * We need to do nothing here, because vmbus_process_offer()
795 * invokes channel->sc_creation_callback, which will open and use
796 * the sub-channel(s).
800 static void cache_wwn(struct storvsc_device *stor_device,
801 struct vstor_packet *vstor_packet)
804 * Cache the currently active port and node ww names.
806 if (vstor_packet->wwn_packet.primary_active) {
807 stor_device->node_name =
808 wwn_to_u64(vstor_packet->wwn_packet.primary_node_wwn);
809 stor_device->port_name =
810 wwn_to_u64(vstor_packet->wwn_packet.primary_port_wwn);
812 stor_device->node_name =
813 wwn_to_u64(vstor_packet->wwn_packet.secondary_node_wwn);
814 stor_device->port_name =
815 wwn_to_u64(vstor_packet->wwn_packet.secondary_port_wwn);
820 static int storvsc_execute_vstor_op(struct hv_device *device,
821 struct storvsc_cmd_request *request,
824 struct vstor_packet *vstor_packet;
827 vstor_packet = &request->vstor_packet;
829 init_completion(&request->wait_event);
830 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
832 ret = vmbus_sendpacket(device->channel, vstor_packet,
833 (sizeof(struct vstor_packet) -
835 (unsigned long)request,
837 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
841 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
848 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
849 vstor_packet->status != 0)
855 static int storvsc_channel_init(struct hv_device *device, bool is_fc)
857 struct storvsc_device *stor_device;
858 struct storvsc_cmd_request *request;
859 struct vstor_packet *vstor_packet;
862 bool process_sub_channels = false;
864 stor_device = get_out_stor_device(device);
868 request = &stor_device->init_request;
869 vstor_packet = &request->vstor_packet;
872 * Now, initiate the vsc/vsp initialization protocol on the open
875 memset(request, 0, sizeof(struct storvsc_cmd_request));
876 vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
877 ret = storvsc_execute_vstor_op(device, request, true);
881 * Query host supported protocol version.
884 for (i = 0; i < ARRAY_SIZE(vmstor_protocols); i++) {
885 /* reuse the packet for version range supported */
886 memset(vstor_packet, 0, sizeof(struct vstor_packet));
887 vstor_packet->operation =
888 VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
890 vstor_packet->version.major_minor =
891 vmstor_protocols[i].protocol_version;
894 * The revision number is only used in Windows; set it to 0.
896 vstor_packet->version.revision = 0;
897 ret = storvsc_execute_vstor_op(device, request, false);
901 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO)
904 if (vstor_packet->status == 0) {
905 vmstor_proto_version =
906 vmstor_protocols[i].protocol_version;
909 vmstor_protocols[i].sense_buffer_size;
912 vmstor_protocols[i].vmscsi_size_delta;
918 if (vstor_packet->status != 0)
922 memset(vstor_packet, 0, sizeof(struct vstor_packet));
923 vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
924 ret = storvsc_execute_vstor_op(device, request, true);
929 * Check to see if multi-channel support is there.
930 * Hosts that implement protocol version of 5.1 and above
931 * support multi-channel.
933 max_chns = vstor_packet->storage_channel_properties.max_channel_cnt;
936 * Allocate state to manage the sub-channels.
937 * We allocate an array based on the numbers of possible CPUs
938 * (Hyper-V does not support cpu online/offline).
939 * This Array will be sparseley populated with unique
940 * channels - primary + sub-channels.
941 * We will however populate all the slots to evenly distribute
944 stor_device->stor_chns = kcalloc(num_possible_cpus(), sizeof(void *),
946 if (stor_device->stor_chns == NULL)
949 device->channel->change_target_cpu_callback = storvsc_change_target_cpu;
951 stor_device->stor_chns[device->channel->target_cpu] = device->channel;
952 cpumask_set_cpu(device->channel->target_cpu,
953 &stor_device->alloced_cpus);
955 if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) {
956 if (vstor_packet->storage_channel_properties.flags &
957 STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
958 process_sub_channels = true;
960 stor_device->max_transfer_bytes =
961 vstor_packet->storage_channel_properties.max_transfer_bytes;
967 * For FC devices retrieve FC HBA data.
969 memset(vstor_packet, 0, sizeof(struct vstor_packet));
970 vstor_packet->operation = VSTOR_OPERATION_FCHBA_DATA;
971 ret = storvsc_execute_vstor_op(device, request, true);
976 * Cache the currently active port and node ww names.
978 cache_wwn(stor_device, vstor_packet);
982 memset(vstor_packet, 0, sizeof(struct vstor_packet));
983 vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
984 ret = storvsc_execute_vstor_op(device, request, true);
988 if (process_sub_channels)
989 handle_multichannel_storage(device, max_chns);
994 static void storvsc_handle_error(struct vmscsi_request *vm_srb,
995 struct scsi_cmnd *scmnd,
996 struct Scsi_Host *host,
999 struct storvsc_scan_work *wrk;
1000 void (*process_err_fn)(struct work_struct *work);
1001 struct hv_host_device *host_dev = shost_priv(host);
1003 switch (SRB_STATUS(vm_srb->srb_status)) {
1004 case SRB_STATUS_ERROR:
1005 case SRB_STATUS_ABORTED:
1006 case SRB_STATUS_INVALID_REQUEST:
1007 if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) {
1008 /* Check for capacity change */
1009 if ((asc == 0x2a) && (ascq == 0x9)) {
1010 process_err_fn = storvsc_device_scan;
1011 /* Retry the I/O that triggered this. */
1012 set_host_byte(scmnd, DID_REQUEUE);
1017 * Check for "Operating parameters have changed"
1018 * due to Hyper-V changing the VHD/VHDX BlockSize
1019 * when adding/removing a differencing disk. This
1020 * causes discard_granularity to change, so do a
1021 * rescan to pick up the new granularity. We don't
1022 * want scsi_report_sense() to output a message
1023 * that a sysadmin wouldn't know what to do with.
1025 if ((asc == 0x3f) && (ascq != 0x03) &&
1027 process_err_fn = storvsc_device_scan;
1028 set_host_byte(scmnd, DID_REQUEUE);
1033 * Otherwise, let upper layer deal with the
1034 * error when sense message is present
1040 * If there is an error; offline the device since all
1041 * error recovery strategies would have already been
1042 * deployed on the host side. However, if the command
1043 * were a pass-through command deal with it appropriately.
1045 switch (scmnd->cmnd[0]) {
1048 set_host_byte(scmnd, DID_PASSTHROUGH);
1051 * On some Hyper-V hosts TEST_UNIT_READY command can
1052 * return SRB_STATUS_ERROR. Let the upper level code
1053 * deal with it based on the sense information.
1055 case TEST_UNIT_READY:
1058 set_host_byte(scmnd, DID_ERROR);
1062 case SRB_STATUS_INVALID_LUN:
1063 set_host_byte(scmnd, DID_NO_CONNECT);
1064 process_err_fn = storvsc_remove_lun;
1072 * We need to schedule work to process this error; schedule it.
1074 wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
1076 set_host_byte(scmnd, DID_TARGET_FAILURE);
1081 wrk->lun = vm_srb->lun;
1082 wrk->tgt_id = vm_srb->target_id;
1083 INIT_WORK(&wrk->work, process_err_fn);
1084 queue_work(host_dev->handle_error_wq, &wrk->work);
1088 static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
1089 struct storvsc_device *stor_dev)
1091 struct scsi_cmnd *scmnd = cmd_request->cmd;
1092 struct scsi_sense_hdr sense_hdr;
1093 struct vmscsi_request *vm_srb;
1094 u32 data_transfer_length;
1095 struct Scsi_Host *host;
1096 u32 payload_sz = cmd_request->payload_sz;
1097 void *payload = cmd_request->payload;
1099 host = stor_dev->host;
1101 vm_srb = &cmd_request->vstor_packet.vm_srb;
1102 data_transfer_length = vm_srb->data_transfer_length;
1104 scmnd->result = vm_srb->scsi_status;
1106 if (scmnd->result) {
1107 if (scsi_normalize_sense(scmnd->sense_buffer,
1108 SCSI_SENSE_BUFFERSIZE, &sense_hdr) &&
1109 !(sense_hdr.sense_key == NOT_READY &&
1110 sense_hdr.asc == 0x03A) &&
1111 do_logging(STORVSC_LOGGING_ERROR))
1112 scsi_print_sense_hdr(scmnd->device, "storvsc",
1116 if (vm_srb->srb_status != SRB_STATUS_SUCCESS) {
1117 storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
1120 * The Windows driver set data_transfer_length on
1121 * SRB_STATUS_DATA_OVERRUN. On other errors, this value
1122 * is untouched. In these cases we set it to 0.
1124 if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN)
1125 data_transfer_length = 0;
1128 /* Validate data_transfer_length (from Hyper-V) */
1129 if (data_transfer_length > cmd_request->payload->range.len)
1130 data_transfer_length = cmd_request->payload->range.len;
1132 scsi_set_resid(scmnd,
1133 cmd_request->payload->range.len - data_transfer_length);
1135 scmnd->scsi_done(scmnd);
1138 sizeof(struct vmbus_channel_packet_multipage_buffer))
1142 static void storvsc_on_io_completion(struct storvsc_device *stor_device,
1143 struct vstor_packet *vstor_packet,
1144 struct storvsc_cmd_request *request)
1146 struct vstor_packet *stor_pkt;
1147 struct hv_device *device = stor_device->device;
1149 stor_pkt = &request->vstor_packet;
1152 * The current SCSI handling on the host side does
1153 * not correctly handle:
1154 * INQUIRY command with page code parameter set to 0x80
1155 * MODE_SENSE command with cmd[2] == 0x1c
1157 * Setup srb and scsi status so this won't be fatal.
1158 * We do this so we can distinguish truly fatal failues
1159 * (srb status == 0x4) and off-line the device in that case.
1162 if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
1163 (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
1164 vstor_packet->vm_srb.scsi_status = 0;
1165 vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
1169 /* Copy over the status...etc */
1170 stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
1171 stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
1173 /* Validate sense_info_length (from Hyper-V) */
1174 if (vstor_packet->vm_srb.sense_info_length > sense_buffer_size)
1175 vstor_packet->vm_srb.sense_info_length = sense_buffer_size;
1177 stor_pkt->vm_srb.sense_info_length =
1178 vstor_packet->vm_srb.sense_info_length;
1180 if (vstor_packet->vm_srb.scsi_status != 0 ||
1181 vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS)
1182 storvsc_log(device, STORVSC_LOGGING_WARN,
1183 "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
1184 stor_pkt->vm_srb.cdb[0],
1185 vstor_packet->vm_srb.scsi_status,
1186 vstor_packet->vm_srb.srb_status);
1188 if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
1189 /* CHECK_CONDITION */
1190 if (vstor_packet->vm_srb.srb_status &
1191 SRB_STATUS_AUTOSENSE_VALID) {
1192 /* autosense data available */
1194 storvsc_log(device, STORVSC_LOGGING_WARN,
1195 "stor pkt %p autosense data valid - len %d\n",
1196 request, vstor_packet->vm_srb.sense_info_length);
1198 memcpy(request->cmd->sense_buffer,
1199 vstor_packet->vm_srb.sense_data,
1200 vstor_packet->vm_srb.sense_info_length);
1205 stor_pkt->vm_srb.data_transfer_length =
1206 vstor_packet->vm_srb.data_transfer_length;
1208 storvsc_command_completion(request, stor_device);
1210 if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
1211 stor_device->drain_notify)
1212 wake_up(&stor_device->waiting_to_drain);
1217 static void storvsc_on_receive(struct storvsc_device *stor_device,
1218 struct vstor_packet *vstor_packet,
1219 struct storvsc_cmd_request *request)
1221 struct hv_host_device *host_dev;
1222 switch (vstor_packet->operation) {
1223 case VSTOR_OPERATION_COMPLETE_IO:
1224 storvsc_on_io_completion(stor_device, vstor_packet, request);
1227 case VSTOR_OPERATION_REMOVE_DEVICE:
1228 case VSTOR_OPERATION_ENUMERATE_BUS:
1229 host_dev = shost_priv(stor_device->host);
1231 host_dev->handle_error_wq, &host_dev->host_scan_work);
1234 case VSTOR_OPERATION_FCHBA_DATA:
1235 cache_wwn(stor_device, vstor_packet);
1236 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
1237 fc_host_node_name(stor_device->host) = stor_device->node_name;
1238 fc_host_port_name(stor_device->host) = stor_device->port_name;
1246 static void storvsc_on_channel_callback(void *context)
1248 struct vmbus_channel *channel = (struct vmbus_channel *)context;
1249 const struct vmpacket_descriptor *desc;
1250 struct hv_device *device;
1251 struct storvsc_device *stor_device;
1253 if (channel->primary_channel != NULL)
1254 device = channel->primary_channel->device_obj;
1256 device = channel->device_obj;
1258 stor_device = get_in_stor_device(device);
1262 foreach_vmbus_pkt(desc, channel) {
1263 void *packet = hv_pkt_data(desc);
1264 struct storvsc_cmd_request *request;
1266 request = (struct storvsc_cmd_request *)
1267 ((unsigned long)desc->trans_id);
1269 if (request == &stor_device->init_request ||
1270 request == &stor_device->reset_request) {
1271 memcpy(&request->vstor_packet, packet,
1272 (sizeof(struct vstor_packet) - vmscsi_size_delta));
1273 complete(&request->wait_event);
1275 storvsc_on_receive(stor_device, packet, request);
1280 static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size,
1283 struct vmstorage_channel_properties props;
1286 memset(&props, 0, sizeof(struct vmstorage_channel_properties));
1288 ret = vmbus_open(device->channel,
1292 sizeof(struct vmstorage_channel_properties),
1293 storvsc_on_channel_callback, device->channel);
1298 ret = storvsc_channel_init(device, is_fc);
1303 static int storvsc_dev_remove(struct hv_device *device)
1305 struct storvsc_device *stor_device;
1307 stor_device = hv_get_drvdata(device);
1309 stor_device->destroy = true;
1311 /* Make sure flag is set before waiting */
1315 * At this point, all outbound traffic should be disable. We
1316 * only allow inbound traffic (responses) to proceed so that
1317 * outstanding requests can be completed.
1320 storvsc_wait_to_drain(stor_device);
1323 * Since we have already drained, we don't need to busy wait
1324 * as was done in final_release_stor_device()
1325 * Note that we cannot set the ext pointer to NULL until
1326 * we have drained - to drain the outgoing packets, we need to
1327 * allow incoming packets.
1329 hv_set_drvdata(device, NULL);
1331 /* Close the channel */
1332 vmbus_close(device->channel);
1334 kfree(stor_device->stor_chns);
1339 static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
1344 const struct cpumask *node_mask;
1345 int num_channels, tgt_cpu;
1347 if (stor_device->num_sc == 0) {
1348 stor_device->stor_chns[q_num] = stor_device->device->channel;
1349 return stor_device->device->channel;
1353 * Our channel array is sparsley populated and we
1354 * initiated I/O on a processor/hw-q that does not
1355 * currently have a designated channel. Fix this.
1356 * The strategy is simple:
1357 * I. Ensure NUMA locality
1358 * II. Distribute evenly (best effort)
1361 node_mask = cpumask_of_node(cpu_to_node(q_num));
1364 for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
1365 if (cpumask_test_cpu(tgt_cpu, node_mask))
1368 if (num_channels == 0) {
1369 stor_device->stor_chns[q_num] = stor_device->device->channel;
1370 return stor_device->device->channel;
1374 while (hash_qnum >= num_channels)
1375 hash_qnum -= num_channels;
1377 for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
1378 if (!cpumask_test_cpu(tgt_cpu, node_mask))
1380 if (slot == hash_qnum)
1385 stor_device->stor_chns[q_num] = stor_device->stor_chns[tgt_cpu];
1387 return stor_device->stor_chns[q_num];
1391 static int storvsc_do_io(struct hv_device *device,
1392 struct storvsc_cmd_request *request, u16 q_num)
1394 struct storvsc_device *stor_device;
1395 struct vstor_packet *vstor_packet;
1396 struct vmbus_channel *outgoing_channel, *channel;
1397 unsigned long flags;
1399 const struct cpumask *node_mask;
1402 vstor_packet = &request->vstor_packet;
1403 stor_device = get_out_stor_device(device);
1409 request->device = device;
1411 * Select an appropriate channel to send the request out.
1413 /* See storvsc_change_target_cpu(). */
1414 outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]);
1415 if (outgoing_channel != NULL) {
1416 if (outgoing_channel->target_cpu == q_num) {
1418 * Ideally, we want to pick a different channel if
1419 * available on the same NUMA node.
1421 node_mask = cpumask_of_node(cpu_to_node(q_num));
1422 for_each_cpu_wrap(tgt_cpu,
1423 &stor_device->alloced_cpus, q_num + 1) {
1424 if (!cpumask_test_cpu(tgt_cpu, node_mask))
1426 if (tgt_cpu == q_num)
1428 channel = READ_ONCE(
1429 stor_device->stor_chns[tgt_cpu]);
1430 if (channel == NULL)
1432 if (hv_get_avail_to_write_percent(
1434 > ring_avail_percent_lowater) {
1435 outgoing_channel = channel;
1441 * All the other channels on the same NUMA node are
1442 * busy. Try to use the channel on the current CPU
1444 if (hv_get_avail_to_write_percent(
1445 &outgoing_channel->outbound)
1446 > ring_avail_percent_lowater)
1450 * If we reach here, all the channels on the current
1451 * NUMA node are busy. Try to find a channel in
1454 for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
1455 if (cpumask_test_cpu(tgt_cpu, node_mask))
1457 channel = READ_ONCE(
1458 stor_device->stor_chns[tgt_cpu]);
1459 if (channel == NULL)
1461 if (hv_get_avail_to_write_percent(
1463 > ring_avail_percent_lowater) {
1464 outgoing_channel = channel;
1470 spin_lock_irqsave(&stor_device->lock, flags);
1471 outgoing_channel = stor_device->stor_chns[q_num];
1472 if (outgoing_channel != NULL) {
1473 spin_unlock_irqrestore(&stor_device->lock, flags);
1476 outgoing_channel = get_og_chn(stor_device, q_num);
1477 spin_unlock_irqrestore(&stor_device->lock, flags);
1481 vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
1483 vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) -
1487 vstor_packet->vm_srb.sense_info_length = sense_buffer_size;
1490 vstor_packet->vm_srb.data_transfer_length =
1491 request->payload->range.len;
1493 vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
1495 if (request->payload->range.len) {
1497 ret = vmbus_sendpacket_mpb_desc(outgoing_channel,
1498 request->payload, request->payload_sz,
1500 (sizeof(struct vstor_packet) -
1502 (unsigned long)request);
1504 ret = vmbus_sendpacket(outgoing_channel, vstor_packet,
1505 (sizeof(struct vstor_packet) -
1507 (unsigned long)request,
1509 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1515 atomic_inc(&stor_device->num_outstanding_req);
1520 static int storvsc_device_alloc(struct scsi_device *sdevice)
1523 * Set blist flag to permit the reading of the VPD pages even when
1524 * the target may claim SPC-2 compliance. MSFT targets currently
1525 * claim SPC-2 compliance while they implement post SPC-2 features.
1526 * With this flag we can correctly handle WRITE_SAME_16 issues.
1528 * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but
1529 * still supports REPORT LUN.
1531 sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;
1536 static int storvsc_device_configure(struct scsi_device *sdevice)
1538 blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
1540 /* storvsc devices don't support MAINTENANCE_IN SCSI cmd */
1541 sdevice->no_report_opcodes = 1;
1542 sdevice->no_write_same = 1;
1545 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
1546 * if the device is a MSFT virtual device. If the host is
1547 * WIN10 or newer, allow write_same.
1549 if (!strncmp(sdevice->vendor, "Msft", 4)) {
1550 switch (vmstor_proto_version) {
1551 case VMSTOR_PROTO_VERSION_WIN8:
1552 case VMSTOR_PROTO_VERSION_WIN8_1:
1553 sdevice->scsi_level = SCSI_SPC_3;
1557 if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN10)
1558 sdevice->no_write_same = 0;
1564 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
1565 sector_t capacity, int *info)
1567 sector_t nsect = capacity;
1568 sector_t cylinders = nsect;
1569 int heads, sectors_pt;
1572 * We are making up these values; let us keep it simple.
1575 sectors_pt = 0x3f; /* Sectors per track */
1576 sector_div(cylinders, heads * sectors_pt);
1577 if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect)
1581 info[1] = sectors_pt;
1582 info[2] = (int)cylinders;
1587 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
1589 struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
1590 struct hv_device *device = host_dev->dev;
1592 struct storvsc_device *stor_device;
1593 struct storvsc_cmd_request *request;
1594 struct vstor_packet *vstor_packet;
1598 stor_device = get_out_stor_device(device);
1602 request = &stor_device->reset_request;
1603 vstor_packet = &request->vstor_packet;
1604 memset(vstor_packet, 0, sizeof(struct vstor_packet));
1606 init_completion(&request->wait_event);
1608 vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
1609 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
1610 vstor_packet->vm_srb.path_id = stor_device->path_id;
1612 ret = vmbus_sendpacket(device->channel, vstor_packet,
1613 (sizeof(struct vstor_packet) -
1615 (unsigned long)&stor_device->reset_request,
1617 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1621 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
1623 return TIMEOUT_ERROR;
1627 * At this point, all outstanding requests in the adapter
1628 * should have been flushed out and return to us
1629 * There is a potential race here where the host may be in
1630 * the process of responding when we return from here.
1631 * Just wait for all in-transit packets to be accounted for
1632 * before we return from here.
1634 storvsc_wait_to_drain(stor_device);
1640 * The host guarantees to respond to each command, although I/O latencies might
1641 * be unbounded on Azure. Reset the timer unconditionally to give the host a
1642 * chance to perform EH.
1644 static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
1646 return BLK_EH_RESET_TIMER;
1649 static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
1651 bool allowed = true;
1652 u8 scsi_op = scmnd->cmnd[0];
1655 /* the host does not handle WRITE_SAME, log accident usage */
1658 * smartd sends this command and the host does not handle
1659 * this. So, don't send it.
1662 scmnd->result = ILLEGAL_REQUEST << 16;
1671 static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1674 struct hv_host_device *host_dev = shost_priv(host);
1675 struct hv_device *dev = host_dev->dev;
1676 struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd);
1678 struct scatterlist *sgl;
1679 unsigned int sg_count = 0;
1680 struct vmscsi_request *vm_srb;
1681 struct scatterlist *cur_sgl;
1682 struct vmbus_packet_mpb_array *payload;
1686 if (vmstor_proto_version <= VMSTOR_PROTO_VERSION_WIN8) {
1688 * On legacy hosts filter unimplemented commands.
1689 * Future hosts are expected to correctly handle
1690 * unsupported commands. Furthermore, it is
1691 * possible that some of the currently
1692 * unsupported commands maybe supported in
1693 * future versions of the host.
1695 if (!storvsc_scsi_cmd_ok(scmnd)) {
1696 scmnd->scsi_done(scmnd);
1701 /* Setup the cmd request */
1702 cmd_request->cmd = scmnd;
1704 memset(&cmd_request->vstor_packet, 0, sizeof(struct vstor_packet));
1705 vm_srb = &cmd_request->vstor_packet.vm_srb;
1706 vm_srb->win8_extension.time_out_value = 60;
1708 vm_srb->win8_extension.srb_flags |=
1709 SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
1711 if (scmnd->device->tagged_supported) {
1712 vm_srb->win8_extension.srb_flags |=
1713 (SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE);
1714 vm_srb->win8_extension.queue_tag = SP_UNTAGGED;
1715 vm_srb->win8_extension.queue_action = SRB_SIMPLE_TAG_REQUEST;
1719 switch (scmnd->sc_data_direction) {
1721 vm_srb->data_in = WRITE_TYPE;
1722 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
1724 case DMA_FROM_DEVICE:
1725 vm_srb->data_in = READ_TYPE;
1726 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
1729 vm_srb->data_in = UNKNOWN_TYPE;
1730 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
1734 * This is DMA_BIDIRECTIONAL or something else we are never
1735 * supposed to see here.
1737 WARN(1, "Unexpected data direction: %d\n",
1738 scmnd->sc_data_direction);
1743 vm_srb->port_number = host_dev->port;
1744 vm_srb->path_id = scmnd->device->channel;
1745 vm_srb->target_id = scmnd->device->id;
1746 vm_srb->lun = scmnd->device->lun;
1748 vm_srb->cdb_length = scmnd->cmd_len;
1750 memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
1752 sgl = (struct scatterlist *)scsi_sglist(scmnd);
1753 sg_count = scsi_sg_count(scmnd);
1755 length = scsi_bufflen(scmnd);
1756 payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
1760 unsigned int hvpgoff = 0;
1761 unsigned long offset_in_hvpg = sgl->offset & ~HV_HYP_PAGE_MASK;
1762 unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length);
1765 payload_sz = (hvpg_count * sizeof(u64) +
1766 sizeof(struct vmbus_packet_mpb_array));
1768 if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
1769 payload = kzalloc(payload_sz, GFP_ATOMIC);
1771 return SCSI_MLQUEUE_DEVICE_BUSY;
1775 * sgl is a list of PAGEs, and payload->range.pfn_array
1776 * expects the page number in the unit of HV_HYP_PAGE_SIZE (the
1777 * page size that Hyper-V uses, so here we need to divide PAGEs
1778 * into HV_HYP_PAGE in case that PAGE_SIZE > HV_HYP_PAGE_SIZE.
1779 * Besides, payload->range.offset should be the offset in one
1782 payload->range.len = length;
1783 payload->range.offset = offset_in_hvpg;
1784 hvpgoff = sgl->offset >> HV_HYP_PAGE_SHIFT;
1787 for (i = 0; i < hvpg_count; i++) {
1789 * 'i' is the index of hv pages in the payload and
1790 * 'hvpgoff' is the offset (in hv pages) of the first
1791 * hv page in the the first page. The relationship
1792 * between the sum of 'i' and 'hvpgoff' and the offset
1793 * (in hv pages) in a payload page ('hvpgoff_in_page')
1796 * |------------------ PAGE -------------------|
1797 * | NR_HV_HYP_PAGES_IN_PAGE hvpgs in total |
1798 * |hvpg|hvpg| ... |hvpg|... |hvpg|
1800 * +-hvpgoff-+ +-hvpgoff_in_page-+
1802 * +--------------------- i ---------------------------+
1804 unsigned int hvpgoff_in_page =
1805 (i + hvpgoff) % NR_HV_HYP_PAGES_IN_PAGE;
1808 * Two cases that we need to fetch a page:
1809 * 1) i == 0, the first step or
1810 * 2) hvpgoff_in_page == 0, when we reach the boundary
1813 if (hvpgoff_in_page == 0 || i == 0) {
1814 hvpfn = page_to_hvpfn(sg_page(cur_sgl));
1815 cur_sgl = sg_next(cur_sgl);
1818 payload->range.pfn_array[i] = hvpfn + hvpgoff_in_page;
1822 cmd_request->payload = payload;
1823 cmd_request->payload_sz = payload_sz;
1825 /* Invokes the vsc to start an IO */
1826 ret = storvsc_do_io(dev, cmd_request, get_cpu());
1829 if (ret == -EAGAIN) {
1830 if (payload_sz > sizeof(cmd_request->mpb))
1833 return SCSI_MLQUEUE_DEVICE_BUSY;
1839 static struct scsi_host_template scsi_driver = {
1840 .module = THIS_MODULE,
1841 .name = "storvsc_host_t",
1842 .cmd_size = sizeof(struct storvsc_cmd_request),
1843 .bios_param = storvsc_get_chs,
1844 .queuecommand = storvsc_queuecommand,
1845 .eh_host_reset_handler = storvsc_host_reset_handler,
1846 .proc_name = "storvsc_host",
1847 .eh_timed_out = storvsc_eh_timed_out,
1848 .slave_alloc = storvsc_device_alloc,
1849 .slave_configure = storvsc_device_configure,
1850 .cmd_per_lun = 2048,
1852 /* Make sure we dont get a sg segment crosses a page boundary */
1853 .dma_boundary = PAGE_SIZE-1,
1854 /* Ensure there are no gaps in presented sgls */
1855 .virt_boundary_mask = PAGE_SIZE-1,
1857 .track_queue_depth = 1,
1858 .change_queue_depth = storvsc_change_queue_depth,
1867 static const struct hv_vmbus_device_id id_table[] = {
1870 .driver_data = SCSI_GUID
1874 .driver_data = IDE_GUID
1876 /* Fibre Channel GUID */
1879 .driver_data = SFC_GUID
1884 MODULE_DEVICE_TABLE(vmbus, id_table);
1886 static const struct { guid_t guid; } fc_guid = { HV_SYNTHFC_GUID };
1888 static bool hv_dev_is_fc(struct hv_device *hv_dev)
1890 return guid_equal(&fc_guid.guid, &hv_dev->dev_type);
1893 static int storvsc_probe(struct hv_device *device,
1894 const struct hv_vmbus_device_id *dev_id)
1897 int num_cpus = num_online_cpus();
1898 struct Scsi_Host *host;
1899 struct hv_host_device *host_dev;
1900 bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
1901 bool is_fc = ((dev_id->driver_data == SFC_GUID) ? true : false);
1903 struct storvsc_device *stor_device;
1904 int max_luns_per_target;
1907 int max_sub_channels = 0;
1910 * Based on the windows host we are running on,
1911 * set state to properly communicate with the host.
1914 if (vmbus_proto_version < VERSION_WIN8) {
1915 max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET;
1916 max_targets = STORVSC_IDE_MAX_TARGETS;
1917 max_channels = STORVSC_IDE_MAX_CHANNELS;
1919 max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
1920 max_targets = STORVSC_MAX_TARGETS;
1921 max_channels = STORVSC_MAX_CHANNELS;
1923 * On Windows8 and above, we support sub-channels for storage
1924 * on SCSI and FC controllers.
1925 * The number of sub-channels offerred is based on the number of
1926 * VCPUs in the guest.
1930 (num_cpus - 1) / storvsc_vcpus_per_sub_channel;
1933 scsi_driver.can_queue = max_outstanding_req_per_channel *
1934 (max_sub_channels + 1) *
1935 (100 - ring_avail_percent_lowater) / 100;
1937 host = scsi_host_alloc(&scsi_driver,
1938 sizeof(struct hv_host_device));
1942 host_dev = shost_priv(host);
1943 memset(host_dev, 0, sizeof(struct hv_host_device));
1945 host_dev->port = host->host_no;
1946 host_dev->dev = device;
1947 host_dev->host = host;
1950 stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
1956 stor_device->destroy = false;
1957 init_waitqueue_head(&stor_device->waiting_to_drain);
1958 stor_device->device = device;
1959 stor_device->host = host;
1960 spin_lock_init(&stor_device->lock);
1961 hv_set_drvdata(device, stor_device);
1963 stor_device->port_number = host->host_no;
1964 ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc);
1968 host_dev->path = stor_device->path_id;
1969 host_dev->target = stor_device->target_id;
1971 switch (dev_id->driver_data) {
1973 host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET;
1974 host->max_id = STORVSC_FC_MAX_TARGETS;
1975 host->max_channel = STORVSC_FC_MAX_CHANNELS - 1;
1976 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
1977 host->transportt = fc_transport_template;
1982 host->max_lun = max_luns_per_target;
1983 host->max_id = max_targets;
1984 host->max_channel = max_channels - 1;
1988 host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET;
1989 host->max_id = STORVSC_IDE_MAX_TARGETS;
1990 host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1;
1993 /* max cmd length */
1994 host->max_cmd_len = STORVSC_MAX_CMD_LEN;
1997 * set the table size based on the info we got
2000 host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
2002 * For non-IDE disks, the host supports multiple channels.
2003 * Set the number of HW queues we are supporting.
2006 host->nr_hw_queues = num_present_cpus();
2009 * Set the error handler work queue.
2011 host_dev->handle_error_wq =
2012 alloc_ordered_workqueue("storvsc_error_wq_%d",
2015 if (!host_dev->handle_error_wq) {
2019 INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan);
2020 /* Register the HBA and start the scsi bus scan */
2021 ret = scsi_add_host(host, &device->device);
2026 scsi_scan_host(host);
2028 target = (device->dev_instance.b[5] << 8 |
2029 device->dev_instance.b[4]);
2030 ret = scsi_add_device(host, 0, target, 0);
2034 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
2035 if (host->transportt == fc_transport_template) {
2036 struct fc_rport_identifiers ids = {
2037 .roles = FC_PORT_ROLE_FCP_DUMMY_INITIATOR,
2040 fc_host_node_name(host) = stor_device->node_name;
2041 fc_host_port_name(host) = stor_device->port_name;
2042 stor_device->rport = fc_remote_port_add(host, 0, &ids);
2043 if (!stor_device->rport) {
2052 scsi_remove_host(host);
2055 destroy_workqueue(host_dev->handle_error_wq);
2059 * Once we have connected with the host, we would need to
2060 * to invoke storvsc_dev_remove() to rollback this state and
2061 * this call also frees up the stor_device; hence the jump around
2064 storvsc_dev_remove(device);
2068 kfree(stor_device->stor_chns);
2072 scsi_host_put(host);
2076 /* Change a scsi target's queue depth */
2077 static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth)
2079 if (queue_depth > scsi_driver.can_queue)
2080 queue_depth = scsi_driver.can_queue;
2082 return scsi_change_queue_depth(sdev, queue_depth);
2085 static int storvsc_remove(struct hv_device *dev)
2087 struct storvsc_device *stor_device = hv_get_drvdata(dev);
2088 struct Scsi_Host *host = stor_device->host;
2089 struct hv_host_device *host_dev = shost_priv(host);
2091 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
2092 if (host->transportt == fc_transport_template) {
2093 fc_remote_port_delete(stor_device->rport);
2094 fc_remove_host(host);
2097 destroy_workqueue(host_dev->handle_error_wq);
2098 scsi_remove_host(host);
2099 storvsc_dev_remove(dev);
2100 scsi_host_put(host);
2105 static int storvsc_suspend(struct hv_device *hv_dev)
2107 struct storvsc_device *stor_device = hv_get_drvdata(hv_dev);
2108 struct Scsi_Host *host = stor_device->host;
2109 struct hv_host_device *host_dev = shost_priv(host);
2111 storvsc_wait_to_drain(stor_device);
2113 drain_workqueue(host_dev->handle_error_wq);
2115 vmbus_close(hv_dev->channel);
2117 kfree(stor_device->stor_chns);
2118 stor_device->stor_chns = NULL;
2120 cpumask_clear(&stor_device->alloced_cpus);
2125 static int storvsc_resume(struct hv_device *hv_dev)
2129 ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size,
2130 hv_dev_is_fc(hv_dev));
2134 static struct hv_driver storvsc_drv = {
2135 .name = KBUILD_MODNAME,
2136 .id_table = id_table,
2137 .probe = storvsc_probe,
2138 .remove = storvsc_remove,
2139 .suspend = storvsc_suspend,
2140 .resume = storvsc_resume,
2142 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2146 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
2147 static struct fc_function_template fc_transport_functions = {
2148 .show_host_node_name = 1,
2149 .show_host_port_name = 1,
2153 static int __init storvsc_drv_init(void)
2158 * Divide the ring buffer data size (which is 1 page less
2159 * than the ring buffer size since that page is reserved for
2160 * the ring buffer indices) by the max request size (which is
2161 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
2163 max_outstanding_req_per_channel =
2164 ((storvsc_ringbuffer_size - PAGE_SIZE) /
2165 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
2166 sizeof(struct vstor_packet) + sizeof(u64) -
2170 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
2171 fc_transport_template = fc_attach_transport(&fc_transport_functions);
2172 if (!fc_transport_template)
2176 ret = vmbus_driver_register(&storvsc_drv);
2178 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
2180 fc_release_transport(fc_transport_template);
2186 static void __exit storvsc_drv_exit(void)
2188 vmbus_driver_unregister(&storvsc_drv);
2189 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
2190 fc_release_transport(fc_transport_template);
2194 MODULE_LICENSE("GPL");
2195 MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
2196 module_init(storvsc_drv_init);
2197 module_exit(storvsc_drv_exit);