2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/pci.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/rtc.h>
26 #include <linux/bcd.h>
27 #include <linux/cciss_ioctl.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_eh.h>
32 #include <scsi/scsi_transport_sas.h>
33 #include <asm/unaligned.h>
35 #include "smartpqi_sis.h"
37 #if !defined(BUILD_TIMESTAMP)
38 #define BUILD_TIMESTAMP
41 #define DRIVER_VERSION "0.9.13-370"
42 #define DRIVER_MAJOR 0
43 #define DRIVER_MINOR 9
44 #define DRIVER_RELEASE 13
45 #define DRIVER_REVISION 370
47 #define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
48 #define DRIVER_NAME_SHORT "smartpqi"
50 MODULE_AUTHOR("Microsemi");
51 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
53 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
54 MODULE_VERSION(DRIVER_VERSION);
55 MODULE_LICENSE("GPL");
57 #define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
59 static char *hpe_branded_controller = "HPE Smart Array Controller";
60 static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
62 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
63 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
64 static void pqi_scan_start(struct Scsi_Host *shost);
65 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
66 struct pqi_queue_group *queue_group, enum pqi_io_path path,
67 struct pqi_io_request *io_request);
68 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
69 struct pqi_iu_header *request, unsigned int flags,
70 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
71 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
72 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
73 unsigned int cdb_length, struct pqi_queue_group *queue_group,
74 struct pqi_encryption_info *encryption_info);
76 /* for flags argument to pqi_submit_raid_request_synchronous() */
77 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
79 static struct scsi_transport_template *pqi_sas_transport_template;
81 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
83 static int pqi_disable_device_id_wildcards;
84 module_param_named(disable_device_id_wildcards,
85 pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
86 MODULE_PARM_DESC(disable_device_id_wildcards,
87 "Disable device ID wildcards.");
89 static char *raid_levels[] = {
99 static char *pqi_raid_level_to_string(u8 raid_level)
101 if (raid_level < ARRAY_SIZE(raid_levels))
102 return raid_levels[raid_level];
109 #define SA_RAID_1 2 /* also used for RAID 10 */
110 #define SA_RAID_5 3 /* also used for RAID 50 */
112 #define SA_RAID_6 5 /* also used for RAID 60 */
113 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
114 #define SA_RAID_MAX SA_RAID_ADM
115 #define SA_RAID_UNKNOWN 0xff
117 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
119 scmd->scsi_done(scmd);
122 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
124 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
127 static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
129 void *hostdata = shost_priv(shost);
131 return *((struct pqi_ctrl_info **)hostdata);
134 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
136 return !device->is_physical_device;
139 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
141 return !ctrl_info->controller_online;
144 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
146 if (ctrl_info->controller_online)
147 if (!sis_is_firmware_running(ctrl_info))
148 pqi_take_ctrl_offline(ctrl_info);
151 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
153 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
156 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
157 struct pqi_ctrl_info *ctrl_info)
159 return sis_read_driver_scratch(ctrl_info);
162 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
163 enum pqi_ctrl_mode mode)
165 sis_write_driver_scratch(ctrl_info, mode);
168 #define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
170 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
172 schedule_delayed_work(&ctrl_info->rescan_work,
173 PQI_RESCAN_WORK_INTERVAL);
176 static int pqi_map_single(struct pci_dev *pci_dev,
177 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
178 size_t buffer_length, int data_direction)
180 dma_addr_t bus_address;
182 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
185 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
187 if (pci_dma_mapping_error(pci_dev, bus_address))
190 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
191 put_unaligned_le32(buffer_length, &sg_descriptor->length);
192 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
197 static void pqi_pci_unmap(struct pci_dev *pci_dev,
198 struct pqi_sg_descriptor *descriptors, int num_descriptors,
203 if (data_direction == PCI_DMA_NONE)
206 for (i = 0; i < num_descriptors; i++)
207 pci_unmap_single(pci_dev,
208 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
209 get_unaligned_le32(&descriptors[i].length),
213 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
214 struct pqi_raid_path_request *request, u8 cmd,
215 u8 *scsi3addr, void *buffer, size_t buffer_length,
216 u16 vpd_page, int *pci_direction)
221 memset(request, 0, sizeof(*request));
223 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
224 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
225 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
226 &request->header.iu_length);
227 put_unaligned_le32(buffer_length, &request->buffer_length);
228 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
229 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
230 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
236 request->data_direction = SOP_READ_FLAG;
238 if (vpd_page & VPD_PAGE) {
240 cdb[2] = (u8)vpd_page;
242 cdb[4] = (u8)buffer_length;
244 case CISS_REPORT_LOG:
245 case CISS_REPORT_PHYS:
246 request->data_direction = SOP_READ_FLAG;
248 if (cmd == CISS_REPORT_PHYS)
249 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
251 cdb[1] = CISS_REPORT_LOG_EXTENDED;
252 put_unaligned_be32(buffer_length, &cdb[6]);
254 case CISS_GET_RAID_MAP:
255 request->data_direction = SOP_READ_FLAG;
257 cdb[1] = CISS_GET_RAID_MAP;
258 put_unaligned_be32(buffer_length, &cdb[6]);
261 request->data_direction = SOP_WRITE_FLAG;
263 cdb[6] = BMIC_CACHE_FLUSH;
264 put_unaligned_be16(buffer_length, &cdb[7]);
266 case BMIC_IDENTIFY_CONTROLLER:
267 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
268 request->data_direction = SOP_READ_FLAG;
271 put_unaligned_be16(buffer_length, &cdb[7]);
273 case BMIC_WRITE_HOST_WELLNESS:
274 request->data_direction = SOP_WRITE_FLAG;
277 put_unaligned_be16(buffer_length, &cdb[7]);
280 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
286 switch (request->data_direction) {
288 pci_dir = PCI_DMA_FROMDEVICE;
291 pci_dir = PCI_DMA_TODEVICE;
293 case SOP_NO_DIRECTION_FLAG:
294 pci_dir = PCI_DMA_NONE;
297 pci_dir = PCI_DMA_BIDIRECTIONAL;
301 *pci_direction = pci_dir;
303 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
304 buffer, buffer_length, pci_dir);
307 static struct pqi_io_request *pqi_alloc_io_request(
308 struct pqi_ctrl_info *ctrl_info)
310 struct pqi_io_request *io_request;
311 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
314 io_request = &ctrl_info->io_request_pool[i];
315 if (atomic_inc_return(&io_request->refcount) == 1)
317 atomic_dec(&io_request->refcount);
318 i = (i + 1) % ctrl_info->max_io_slots;
322 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
324 io_request->scmd = NULL;
325 io_request->status = 0;
326 io_request->error_info = NULL;
331 static void pqi_free_io_request(struct pqi_io_request *io_request)
333 atomic_dec(&io_request->refcount);
336 static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
337 struct bmic_identify_controller *buffer)
341 struct pqi_raid_path_request request;
343 rc = pqi_build_raid_path_request(ctrl_info, &request,
344 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
345 sizeof(*buffer), 0, &pci_direction);
349 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
352 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
358 static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
359 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
363 struct pqi_raid_path_request request;
365 rc = pqi_build_raid_path_request(ctrl_info, &request,
366 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
371 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
374 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
380 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
381 struct pqi_scsi_dev *device,
382 struct bmic_identify_physical_device *buffer,
383 size_t buffer_length)
387 u16 bmic_device_index;
388 struct pqi_raid_path_request request;
390 rc = pqi_build_raid_path_request(ctrl_info, &request,
391 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
392 buffer_length, 0, &pci_direction);
396 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
397 request.cdb[2] = (u8)bmic_device_index;
398 request.cdb[9] = (u8)(bmic_device_index >> 8);
400 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
401 0, NULL, NO_TIMEOUT);
403 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
409 #define SA_CACHE_FLUSH_BUFFER_LENGTH 4
411 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
414 struct pqi_raid_path_request request;
419 * Don't bother trying to flush the cache if the controller is
422 if (pqi_ctrl_offline(ctrl_info))
425 buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
429 rc = pqi_build_raid_path_request(ctrl_info, &request,
430 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
431 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
435 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
436 0, NULL, NO_TIMEOUT);
438 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
447 static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
448 void *buffer, size_t buffer_length)
451 struct pqi_raid_path_request request;
454 rc = pqi_build_raid_path_request(ctrl_info, &request,
455 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
456 buffer_length, 0, &pci_direction);
460 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
461 0, NULL, NO_TIMEOUT);
463 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
471 struct bmic_host_wellness_driver_version {
473 u8 driver_version_tag[2];
474 __le16 driver_version_length;
475 char driver_version[32];
476 u8 dont_write_tag[2];
482 static int pqi_write_driver_version_to_host_wellness(
483 struct pqi_ctrl_info *ctrl_info)
486 struct bmic_host_wellness_driver_version *buffer;
487 size_t buffer_length;
489 buffer_length = sizeof(*buffer);
491 buffer = kmalloc(buffer_length, GFP_KERNEL);
495 buffer->start_tag[0] = '<';
496 buffer->start_tag[1] = 'H';
497 buffer->start_tag[2] = 'W';
498 buffer->start_tag[3] = '>';
499 buffer->driver_version_tag[0] = 'D';
500 buffer->driver_version_tag[1] = 'V';
501 put_unaligned_le16(sizeof(buffer->driver_version),
502 &buffer->driver_version_length);
503 strncpy(buffer->driver_version, DRIVER_VERSION,
504 sizeof(buffer->driver_version) - 1);
505 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
506 buffer->dont_write_tag[0] = 'D';
507 buffer->dont_write_tag[1] = 'W';
508 buffer->end_tag[0] = 'Z';
509 buffer->end_tag[1] = 'Z';
511 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
520 struct bmic_host_wellness_time {
525 u8 dont_write_tag[2];
531 static int pqi_write_current_time_to_host_wellness(
532 struct pqi_ctrl_info *ctrl_info)
535 struct bmic_host_wellness_time *buffer;
536 size_t buffer_length;
541 buffer_length = sizeof(*buffer);
543 buffer = kmalloc(buffer_length, GFP_KERNEL);
547 buffer->start_tag[0] = '<';
548 buffer->start_tag[1] = 'H';
549 buffer->start_tag[2] = 'W';
550 buffer->start_tag[3] = '>';
551 buffer->time_tag[0] = 'T';
552 buffer->time_tag[1] = 'D';
553 put_unaligned_le16(sizeof(buffer->time),
554 &buffer->time_length);
556 local_time = ktime_get_real_seconds();
557 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
558 year = tm.tm_year + 1900;
560 buffer->time[0] = bin2bcd(tm.tm_hour);
561 buffer->time[1] = bin2bcd(tm.tm_min);
562 buffer->time[2] = bin2bcd(tm.tm_sec);
564 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
565 buffer->time[5] = bin2bcd(tm.tm_mday);
566 buffer->time[6] = bin2bcd(year / 100);
567 buffer->time[7] = bin2bcd(year % 100);
569 buffer->dont_write_tag[0] = 'D';
570 buffer->dont_write_tag[1] = 'W';
571 buffer->end_tag[0] = 'Z';
572 buffer->end_tag[1] = 'Z';
574 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
581 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
583 static void pqi_update_time_worker(struct work_struct *work)
586 struct pqi_ctrl_info *ctrl_info;
588 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
591 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
593 dev_warn(&ctrl_info->pci_dev->dev,
594 "error updating time on controller\n");
596 schedule_delayed_work(&ctrl_info->update_time_work,
597 PQI_UPDATE_TIME_WORK_INTERVAL);
600 static inline void pqi_schedule_update_time_worker(
601 struct pqi_ctrl_info *ctrl_info)
603 schedule_delayed_work(&ctrl_info->update_time_work, 0);
606 static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
607 void *buffer, size_t buffer_length)
611 struct pqi_raid_path_request request;
613 rc = pqi_build_raid_path_request(ctrl_info, &request,
614 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
618 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
621 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
627 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
631 size_t lun_list_length;
632 size_t lun_data_length;
633 size_t new_lun_list_length;
634 void *lun_data = NULL;
635 struct report_lun_header *report_lun_header;
637 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
638 if (!report_lun_header) {
643 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
644 sizeof(*report_lun_header));
648 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
651 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
653 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
659 if (lun_list_length == 0) {
660 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
664 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
668 new_lun_list_length = get_unaligned_be32(
669 &((struct report_lun_header *)lun_data)->list_length);
671 if (new_lun_list_length > lun_list_length) {
672 lun_list_length = new_lun_list_length;
678 kfree(report_lun_header);
690 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
693 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
697 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
700 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
703 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
704 struct report_phys_lun_extended **physdev_list,
705 struct report_log_lun_extended **logdev_list)
708 size_t logdev_list_length;
709 size_t logdev_data_length;
710 struct report_log_lun_extended *internal_logdev_list;
711 struct report_log_lun_extended *logdev_data;
712 struct report_lun_header report_lun_header;
714 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
716 dev_err(&ctrl_info->pci_dev->dev,
717 "report physical LUNs failed\n");
719 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
721 dev_err(&ctrl_info->pci_dev->dev,
722 "report logical LUNs failed\n");
725 * Tack the controller itself onto the end of the logical device list.
728 logdev_data = *logdev_list;
732 get_unaligned_be32(&logdev_data->header.list_length);
734 memset(&report_lun_header, 0, sizeof(report_lun_header));
736 (struct report_log_lun_extended *)&report_lun_header;
737 logdev_list_length = 0;
740 logdev_data_length = sizeof(struct report_lun_header) +
743 internal_logdev_list = kmalloc(logdev_data_length +
744 sizeof(struct report_log_lun_extended), GFP_KERNEL);
745 if (!internal_logdev_list) {
751 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
752 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
753 sizeof(struct report_log_lun_extended_entry));
754 put_unaligned_be32(logdev_list_length +
755 sizeof(struct report_log_lun_extended_entry),
756 &internal_logdev_list->header.list_length);
759 *logdev_list = internal_logdev_list;
764 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
765 int bus, int target, int lun)
768 device->target = target;
772 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
777 scsi3addr = device->scsi3addr;
778 lunid = get_unaligned_le32(scsi3addr);
780 if (pqi_is_hba_lunid(scsi3addr)) {
781 /* The specified device is the controller. */
782 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
783 device->target_lun_valid = true;
787 if (pqi_is_logical_device(device)) {
788 pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
790 device->target_lun_valid = true;
795 * Defer target and LUN assignment for non-controller physical devices
796 * because the SAS transport layer will make these assignments later.
798 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
801 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
802 struct pqi_scsi_dev *device)
808 raid_level = SA_RAID_UNKNOWN;
810 buffer = kmalloc(64, GFP_KERNEL);
812 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
813 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
815 raid_level = buffer[8];
816 if (raid_level > SA_RAID_MAX)
817 raid_level = SA_RAID_UNKNOWN;
822 device->raid_level = raid_level;
825 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
826 struct pqi_scsi_dev *device, struct raid_map *raid_map)
830 u32 r5or6_blocks_per_row;
831 unsigned int num_phys_disks;
832 unsigned int num_raid_map_entries;
834 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
836 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
837 err_msg = "RAID map too small";
841 if (raid_map_size > sizeof(*raid_map)) {
842 err_msg = "RAID map too large";
846 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
847 (get_unaligned_le16(&raid_map->data_disks_per_row) +
848 get_unaligned_le16(&raid_map->metadata_disks_per_row));
849 num_raid_map_entries = num_phys_disks *
850 get_unaligned_le16(&raid_map->row_cnt);
852 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
853 err_msg = "invalid number of map entries in RAID map";
857 if (device->raid_level == SA_RAID_1) {
858 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
859 err_msg = "invalid RAID-1 map";
862 } else if (device->raid_level == SA_RAID_ADM) {
863 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
864 err_msg = "invalid RAID-1(ADM) map";
867 } else if ((device->raid_level == SA_RAID_5 ||
868 device->raid_level == SA_RAID_6) &&
869 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
871 r5or6_blocks_per_row =
872 get_unaligned_le16(&raid_map->strip_size) *
873 get_unaligned_le16(&raid_map->data_disks_per_row);
874 if (r5or6_blocks_per_row == 0) {
875 err_msg = "invalid RAID-5 or RAID-6 map";
883 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
888 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
889 struct pqi_scsi_dev *device)
893 struct pqi_raid_path_request request;
894 struct raid_map *raid_map;
896 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
900 rc = pqi_build_raid_path_request(ctrl_info, &request,
901 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
902 sizeof(*raid_map), 0, &pci_direction);
906 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
909 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
915 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
919 device->raid_map = raid_map;
929 static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
930 struct pqi_scsi_dev *device)
936 buffer = kmalloc(64, GFP_KERNEL);
940 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
941 VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
945 #define OFFLOAD_STATUS_BYTE 4
946 #define OFFLOAD_CONFIGURED_BIT 0x1
947 #define OFFLOAD_ENABLED_BIT 0x2
949 offload_status = buffer[OFFLOAD_STATUS_BYTE];
950 device->offload_configured =
951 !!(offload_status & OFFLOAD_CONFIGURED_BIT);
952 if (device->offload_configured) {
953 device->offload_enabled_pending =
954 !!(offload_status & OFFLOAD_ENABLED_BIT);
955 if (pqi_get_raid_map(ctrl_info, device))
956 device->offload_enabled_pending = false;
964 * Use vendor-specific VPD to determine online/offline status of a volume.
967 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
968 struct pqi_scsi_dev *device)
972 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
973 bool volume_offline = true;
975 struct ciss_vpd_logical_volume_status *vpd;
977 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
981 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
982 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
986 if (vpd->page_code != CISS_VPD_LV_STATUS)
989 page_length = offsetof(struct ciss_vpd_logical_volume_status,
990 volume_status) + vpd->page_length;
991 if (page_length < sizeof(*vpd))
994 volume_status = vpd->volume_status;
995 volume_flags = get_unaligned_be32(&vpd->flags);
996 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1001 device->volume_status = volume_status;
1002 device->volume_offline = volume_offline;
1005 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1006 struct pqi_scsi_dev *device)
1011 buffer = kmalloc(64, GFP_KERNEL);
1015 /* Send an inquiry to the device to see what it is. */
1016 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1020 scsi_sanitize_inquiry_string(&buffer[8], 8);
1021 scsi_sanitize_inquiry_string(&buffer[16], 16);
1023 device->devtype = buffer[0] & 0x1f;
1024 memcpy(device->vendor, &buffer[8],
1025 sizeof(device->vendor));
1026 memcpy(device->model, &buffer[16],
1027 sizeof(device->model));
1029 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1030 pqi_get_raid_level(ctrl_info, device);
1031 pqi_get_offload_status(ctrl_info, device);
1032 pqi_get_volume_status(ctrl_info, device);
1041 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1042 struct pqi_scsi_dev *device,
1043 struct bmic_identify_physical_device *id_phys)
1047 memset(id_phys, 0, sizeof(*id_phys));
1049 rc = pqi_identify_physical_device(ctrl_info, device,
1050 id_phys, sizeof(*id_phys));
1052 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1056 device->queue_depth =
1057 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1058 device->device_type = id_phys->device_type;
1059 device->active_path_index = id_phys->active_path_number;
1060 device->path_map = id_phys->redundant_path_present_map;
1061 memcpy(&device->box,
1062 &id_phys->alternate_paths_phys_box_on_port,
1063 sizeof(device->box));
1064 memcpy(&device->phys_connector,
1065 &id_phys->alternate_paths_phys_connector,
1066 sizeof(device->phys_connector));
1067 device->bay = id_phys->phys_bay_in_box;
1070 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1071 struct pqi_scsi_dev *device)
1074 static const char unknown_state_str[] =
1075 "Volume is in an unknown state (%u)";
1076 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1078 switch (device->volume_status) {
1080 status = "Volume online";
1082 case CISS_LV_FAILED:
1083 status = "Volume failed";
1085 case CISS_LV_NOT_CONFIGURED:
1086 status = "Volume not configured";
1088 case CISS_LV_DEGRADED:
1089 status = "Volume degraded";
1091 case CISS_LV_READY_FOR_RECOVERY:
1092 status = "Volume ready for recovery operation";
1094 case CISS_LV_UNDERGOING_RECOVERY:
1095 status = "Volume undergoing recovery";
1097 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1098 status = "Wrong physical drive was replaced";
1100 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1101 status = "A physical drive not properly connected";
1103 case CISS_LV_HARDWARE_OVERHEATING:
1104 status = "Hardware is overheating";
1106 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1107 status = "Hardware has overheated";
1109 case CISS_LV_UNDERGOING_EXPANSION:
1110 status = "Volume undergoing expansion";
1112 case CISS_LV_NOT_AVAILABLE:
1113 status = "Volume waiting for transforming volume";
1115 case CISS_LV_QUEUED_FOR_EXPANSION:
1116 status = "Volume queued for expansion";
1118 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1119 status = "Volume disabled due to SCSI ID conflict";
1121 case CISS_LV_EJECTED:
1122 status = "Volume has been ejected";
1124 case CISS_LV_UNDERGOING_ERASE:
1125 status = "Volume undergoing background erase";
1127 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1128 status = "Volume ready for predictive spare rebuild";
1130 case CISS_LV_UNDERGOING_RPI:
1131 status = "Volume undergoing rapid parity initialization";
1133 case CISS_LV_PENDING_RPI:
1134 status = "Volume queued for rapid parity initialization";
1136 case CISS_LV_ENCRYPTED_NO_KEY:
1137 status = "Encrypted volume inaccessible - key not present";
1139 case CISS_LV_UNDERGOING_ENCRYPTION:
1140 status = "Volume undergoing encryption process";
1142 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1143 status = "Volume undergoing encryption re-keying process";
1145 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1147 "Encrypted volume inaccessible - disabled on ctrl";
1149 case CISS_LV_PENDING_ENCRYPTION:
1150 status = "Volume pending migration to encrypted state";
1152 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1153 status = "Volume pending encryption rekeying";
1155 case CISS_LV_NOT_SUPPORTED:
1156 status = "Volume not supported on this controller";
1158 case CISS_LV_STATUS_UNAVAILABLE:
1159 status = "Volume status not available";
1162 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1163 unknown_state_str, device->volume_status);
1164 status = unknown_state_buffer;
1168 dev_info(&ctrl_info->pci_dev->dev,
1169 "scsi %d:%d:%d:%d %s\n",
1170 ctrl_info->scsi_host->host_no,
1171 device->bus, device->target, device->lun, status);
1174 static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
1175 struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
1177 struct pqi_scsi_dev *device;
1179 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1180 scsi_device_list_entry) {
1181 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1183 if (pqi_is_logical_device(device))
1185 if (device->aio_handle == aio_handle)
1192 static void pqi_update_logical_drive_queue_depth(
1193 struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
1196 struct raid_map *raid_map;
1197 struct raid_map_disk_data *disk_data;
1198 struct pqi_scsi_dev *phys_disk;
1199 unsigned int num_phys_disks;
1200 unsigned int num_raid_map_entries;
1201 unsigned int queue_depth;
1203 logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
1205 raid_map = logical_drive->raid_map;
1209 disk_data = raid_map->disk_data;
1210 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1211 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1212 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1213 num_raid_map_entries = num_phys_disks *
1214 get_unaligned_le16(&raid_map->row_cnt);
1217 for (i = 0; i < num_raid_map_entries; i++) {
1218 phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
1219 disk_data[i].aio_handle);
1222 dev_warn(&ctrl_info->pci_dev->dev,
1223 "failed to find physical disk for logical drive %016llx\n",
1224 get_unaligned_be64(logical_drive->scsi3addr));
1225 logical_drive->offload_enabled = false;
1226 logical_drive->offload_enabled_pending = false;
1228 logical_drive->raid_map = NULL;
1232 queue_depth += phys_disk->queue_depth;
1235 logical_drive->queue_depth = queue_depth;
1238 static void pqi_update_all_logical_drive_queue_depths(
1239 struct pqi_ctrl_info *ctrl_info)
1241 struct pqi_scsi_dev *device;
1243 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1244 scsi_device_list_entry) {
1245 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1247 if (!pqi_is_logical_device(device))
1249 pqi_update_logical_drive_queue_depth(ctrl_info, device);
1253 static void pqi_rescan_worker(struct work_struct *work)
1255 struct pqi_ctrl_info *ctrl_info;
1257 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1260 pqi_scan_scsi_devices(ctrl_info);
1263 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1264 struct pqi_scsi_dev *device)
1268 if (pqi_is_logical_device(device))
1269 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1270 device->target, device->lun);
1272 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1277 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1278 struct pqi_scsi_dev *device)
1280 if (pqi_is_logical_device(device))
1281 scsi_remove_device(device->sdev);
1283 pqi_remove_sas_device(device);
1286 /* Assumes the SCSI device list lock is held. */
1288 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1289 int bus, int target, int lun)
1291 struct pqi_scsi_dev *device;
1293 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1294 scsi_device_list_entry)
1295 if (device->bus == bus && device->target == target &&
1302 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1303 struct pqi_scsi_dev *dev2)
1305 if (dev1->is_physical_device != dev2->is_physical_device)
1308 if (dev1->is_physical_device)
1309 return dev1->wwid == dev2->wwid;
1311 return memcmp(dev1->volume_id, dev2->volume_id,
1312 sizeof(dev1->volume_id)) == 0;
1315 enum pqi_find_result {
1321 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1322 struct pqi_scsi_dev *device_to_find,
1323 struct pqi_scsi_dev **matching_device)
1325 struct pqi_scsi_dev *device;
1327 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1328 scsi_device_list_entry) {
1329 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1330 device->scsi3addr)) {
1331 *matching_device = device;
1332 if (pqi_device_equal(device_to_find, device)) {
1333 if (device_to_find->volume_offline)
1334 return DEVICE_CHANGED;
1337 return DEVICE_CHANGED;
1341 return DEVICE_NOT_FOUND;
1344 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1345 char *action, struct pqi_scsi_dev *device)
1347 dev_info(&ctrl_info->pci_dev->dev,
1348 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1350 ctrl_info->scsi_host->host_no,
1354 scsi_device_type(device->devtype),
1357 pqi_raid_level_to_string(device->raid_level),
1358 device->offload_configured ? '+' : '-',
1359 device->offload_enabled_pending ? '+' : '-',
1360 device->expose_device ? '+' : '-',
1361 device->queue_depth);
1364 /* Assumes the SCSI device list lock is held. */
1366 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1367 struct pqi_scsi_dev *new_device)
1369 existing_device->devtype = new_device->devtype;
1370 existing_device->device_type = new_device->device_type;
1371 existing_device->bus = new_device->bus;
1372 if (new_device->target_lun_valid) {
1373 existing_device->target = new_device->target;
1374 existing_device->lun = new_device->lun;
1375 existing_device->target_lun_valid = true;
1378 /* By definition, the scsi3addr and wwid fields are already the same. */
1380 existing_device->is_physical_device = new_device->is_physical_device;
1381 existing_device->expose_device = new_device->expose_device;
1382 existing_device->no_uld_attach = new_device->no_uld_attach;
1383 existing_device->aio_enabled = new_device->aio_enabled;
1384 memcpy(existing_device->vendor, new_device->vendor,
1385 sizeof(existing_device->vendor));
1386 memcpy(existing_device->model, new_device->model,
1387 sizeof(existing_device->model));
1388 existing_device->sas_address = new_device->sas_address;
1389 existing_device->raid_level = new_device->raid_level;
1390 existing_device->queue_depth = new_device->queue_depth;
1391 existing_device->aio_handle = new_device->aio_handle;
1392 existing_device->volume_status = new_device->volume_status;
1393 existing_device->active_path_index = new_device->active_path_index;
1394 existing_device->path_map = new_device->path_map;
1395 existing_device->bay = new_device->bay;
1396 memcpy(existing_device->box, new_device->box,
1397 sizeof(existing_device->box));
1398 memcpy(existing_device->phys_connector, new_device->phys_connector,
1399 sizeof(existing_device->phys_connector));
1400 existing_device->offload_configured = new_device->offload_configured;
1401 existing_device->offload_enabled = false;
1402 existing_device->offload_enabled_pending =
1403 new_device->offload_enabled_pending;
1404 existing_device->offload_to_mirror = 0;
1405 kfree(existing_device->raid_map);
1406 existing_device->raid_map = new_device->raid_map;
1408 /* To prevent this from being freed later. */
1409 new_device->raid_map = NULL;
1412 static inline void pqi_free_device(struct pqi_scsi_dev *device)
1415 kfree(device->raid_map);
1421 * Called when exposing a new device to the OS fails in order to re-adjust
1422 * our internal SCSI device list to match the SCSI ML's view.
1425 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1426 struct pqi_scsi_dev *device)
1428 unsigned long flags;
1430 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1431 list_del(&device->scsi_device_list_entry);
1432 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1434 /* Allow the device structure to be freed later. */
1435 device->keep_device = false;
1438 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1439 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1443 unsigned long flags;
1444 enum pqi_find_result find_result;
1445 struct pqi_scsi_dev *device;
1446 struct pqi_scsi_dev *next;
1447 struct pqi_scsi_dev *matching_device;
1448 struct list_head add_list;
1449 struct list_head delete_list;
1451 INIT_LIST_HEAD(&add_list);
1452 INIT_LIST_HEAD(&delete_list);
1455 * The idea here is to do as little work as possible while holding the
1456 * spinlock. That's why we go to great pains to defer anything other
1457 * than updating the internal device list until after we release the
1461 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1463 /* Assume that all devices in the existing list have gone away. */
1464 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1465 scsi_device_list_entry)
1466 device->device_gone = true;
1468 for (i = 0; i < num_new_devices; i++) {
1469 device = new_device_list[i];
1471 find_result = pqi_scsi_find_entry(ctrl_info, device,
1474 switch (find_result) {
1477 * The newly found device is already in the existing
1480 device->new_device = false;
1481 matching_device->device_gone = false;
1482 pqi_scsi_update_device(matching_device, device);
1484 case DEVICE_NOT_FOUND:
1486 * The newly found device is NOT in the existing device
1489 device->new_device = true;
1491 case DEVICE_CHANGED:
1493 * The original device has gone away and we need to add
1496 device->new_device = true;
1499 WARN_ON(find_result);
1504 /* Process all devices that have gone away. */
1505 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1506 scsi_device_list_entry) {
1507 if (device->device_gone) {
1508 list_del(&device->scsi_device_list_entry);
1509 list_add_tail(&device->delete_list_entry, &delete_list);
1513 /* Process all new devices. */
1514 for (i = 0; i < num_new_devices; i++) {
1515 device = new_device_list[i];
1516 if (!device->new_device)
1518 if (device->volume_offline)
1520 list_add_tail(&device->scsi_device_list_entry,
1521 &ctrl_info->scsi_device_list);
1522 list_add_tail(&device->add_list_entry, &add_list);
1523 /* To prevent this device structure from being freed later. */
1524 device->keep_device = true;
1527 pqi_update_all_logical_drive_queue_depths(ctrl_info);
1529 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1530 scsi_device_list_entry)
1531 device->offload_enabled =
1532 device->offload_enabled_pending;
1534 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1536 /* Remove all devices that have gone away. */
1537 list_for_each_entry_safe(device, next, &delete_list,
1538 delete_list_entry) {
1540 pqi_remove_device(ctrl_info, device);
1541 if (device->volume_offline) {
1542 pqi_dev_info(ctrl_info, "offline", device);
1543 pqi_show_volume_status(ctrl_info, device);
1545 pqi_dev_info(ctrl_info, "removed", device);
1547 list_del(&device->delete_list_entry);
1548 pqi_free_device(device);
1552 * Notify the SCSI ML if the queue depth of any existing device has
1555 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1556 scsi_device_list_entry) {
1557 if (device->sdev && device->queue_depth !=
1558 device->advertised_queue_depth) {
1559 device->advertised_queue_depth = device->queue_depth;
1560 scsi_change_queue_depth(device->sdev,
1561 device->advertised_queue_depth);
1565 /* Expose any new devices. */
1566 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1567 if (device->expose_device && !device->sdev) {
1568 rc = pqi_add_device(ctrl_info, device);
1570 dev_warn(&ctrl_info->pci_dev->dev,
1571 "scsi %d:%d:%d:%d addition failed, device not added\n",
1572 ctrl_info->scsi_host->host_no,
1573 device->bus, device->target,
1575 pqi_fixup_botched_add(ctrl_info, device);
1579 pqi_dev_info(ctrl_info, "added", device);
1583 static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1585 bool is_supported = false;
1587 switch (device->devtype) {
1591 case TYPE_MEDIUM_CHANGER:
1592 case TYPE_ENCLOSURE:
1593 is_supported = true;
1597 * Only support the HBA controller itself as a RAID
1598 * controller. If it's a RAID controller other than
1599 * the HBA itself (an external RAID controller, MSA500
1600 * or similar), we don't support it.
1602 if (pqi_is_hba_lunid(device->scsi3addr))
1603 is_supported = true;
1607 return is_supported;
1610 static inline bool pqi_skip_device(u8 *scsi3addr,
1611 struct report_phys_lun_extended_entry *phys_lun_ext_entry)
1615 if (!MASKED_DEVICE(scsi3addr))
1618 /* The device is masked. */
1620 device_flags = phys_lun_ext_entry->device_flags;
1622 if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
1624 * It's a non-disk device. We ignore all devices of this type
1625 * when they're masked.
1633 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1635 /* Expose all devices except for physical devices that are masked. */
1636 if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
1642 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1646 struct list_head new_device_list_head;
1647 struct report_phys_lun_extended *physdev_list = NULL;
1648 struct report_log_lun_extended *logdev_list = NULL;
1649 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1650 struct report_log_lun_extended_entry *log_lun_ext_entry;
1651 struct bmic_identify_physical_device *id_phys = NULL;
1654 struct pqi_scsi_dev **new_device_list = NULL;
1655 struct pqi_scsi_dev *device;
1656 struct pqi_scsi_dev *next;
1657 unsigned int num_new_devices;
1658 unsigned int num_valid_devices;
1659 bool is_physical_device;
1661 static char *out_of_memory_msg =
1662 "out of memory, device discovery stopped";
1664 INIT_LIST_HEAD(&new_device_list_head);
1666 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1672 get_unaligned_be32(&physdev_list->header.list_length)
1673 / sizeof(physdev_list->lun_entries[0]);
1679 get_unaligned_be32(&logdev_list->header.list_length)
1680 / sizeof(logdev_list->lun_entries[0]);
1684 if (num_physicals) {
1686 * We need this buffer for calls to pqi_get_physical_disk_info()
1687 * below. We allocate it here instead of inside
1688 * pqi_get_physical_disk_info() because it's a fairly large
1691 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1693 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1700 num_new_devices = num_physicals + num_logicals;
1702 new_device_list = kmalloc(sizeof(*new_device_list) *
1703 num_new_devices, GFP_KERNEL);
1704 if (!new_device_list) {
1705 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1710 for (i = 0; i < num_new_devices; i++) {
1711 device = kzalloc(sizeof(*device), GFP_KERNEL);
1713 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1718 list_add_tail(&device->new_device_list_entry,
1719 &new_device_list_head);
1723 num_valid_devices = 0;
1725 for (i = 0; i < num_new_devices; i++) {
1727 if (i < num_physicals) {
1728 is_physical_device = true;
1729 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1730 log_lun_ext_entry = NULL;
1731 scsi3addr = phys_lun_ext_entry->lunid;
1733 is_physical_device = false;
1734 phys_lun_ext_entry = NULL;
1736 &logdev_list->lun_entries[i - num_physicals];
1737 scsi3addr = log_lun_ext_entry->lunid;
1740 if (is_physical_device &&
1741 pqi_skip_device(scsi3addr, phys_lun_ext_entry))
1745 device = list_next_entry(device, new_device_list_entry);
1747 device = list_first_entry(&new_device_list_head,
1748 struct pqi_scsi_dev, new_device_list_entry);
1750 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1751 device->is_physical_device = is_physical_device;
1752 device->raid_level = SA_RAID_UNKNOWN;
1754 /* Gather information about the device. */
1755 rc = pqi_get_device_info(ctrl_info, device);
1756 if (rc == -ENOMEM) {
1757 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1762 dev_warn(&ctrl_info->pci_dev->dev,
1763 "obtaining device info failed, skipping device %016llx\n",
1764 get_unaligned_be64(device->scsi3addr));
1769 if (!pqi_is_supported_device(device))
1772 pqi_assign_bus_target_lun(device);
1774 device->expose_device = pqi_expose_device(device);
1776 if (device->is_physical_device) {
1777 device->wwid = phys_lun_ext_entry->wwid;
1778 if ((phys_lun_ext_entry->device_flags &
1779 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1780 phys_lun_ext_entry->aio_handle)
1781 device->aio_enabled = true;
1783 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1784 sizeof(device->volume_id));
1787 switch (device->devtype) {
1790 case TYPE_ENCLOSURE:
1791 if (device->is_physical_device) {
1792 device->sas_address =
1793 get_unaligned_be64(&device->wwid);
1794 if (device->devtype == TYPE_DISK ||
1795 device->devtype == TYPE_ZBC) {
1796 device->aio_handle =
1797 phys_lun_ext_entry->aio_handle;
1798 pqi_get_physical_disk_info(ctrl_info,
1805 new_device_list[num_valid_devices++] = device;
1808 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1811 list_for_each_entry_safe(device, next, &new_device_list_head,
1812 new_device_list_entry) {
1813 if (device->keep_device)
1815 list_del(&device->new_device_list_entry);
1816 pqi_free_device(device);
1819 kfree(new_device_list);
1820 kfree(physdev_list);
1827 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1829 unsigned long flags;
1830 struct pqi_scsi_dev *device;
1831 struct pqi_scsi_dev *next;
1833 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1835 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1836 scsi_device_list_entry) {
1838 pqi_remove_device(ctrl_info, device);
1839 list_del(&device->scsi_device_list_entry);
1840 pqi_free_device(device);
1843 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1846 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1850 if (pqi_ctrl_offline(ctrl_info))
1853 mutex_lock(&ctrl_info->scan_mutex);
1855 rc = pqi_update_scsi_devices(ctrl_info);
1857 pqi_schedule_rescan_worker(ctrl_info);
1859 mutex_unlock(&ctrl_info->scan_mutex);
1864 static void pqi_scan_start(struct Scsi_Host *shost)
1866 pqi_scan_scsi_devices(shost_to_hba(shost));
1869 /* Returns TRUE if scan is finished. */
1871 static int pqi_scan_finished(struct Scsi_Host *shost,
1872 unsigned long elapsed_time)
1874 struct pqi_ctrl_info *ctrl_info;
1876 ctrl_info = shost_priv(shost);
1878 return !mutex_is_locked(&ctrl_info->scan_mutex);
1881 static inline void pqi_set_encryption_info(
1882 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
1885 u32 volume_blk_size;
1888 * Set the encryption tweak values based on logical block address.
1889 * If the block size is 512, the tweak value is equal to the LBA.
1890 * For other block sizes, tweak value is (LBA * block size) / 512.
1892 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
1893 if (volume_blk_size != 512)
1894 first_block = (first_block * volume_blk_size) / 512;
1896 encryption_info->data_encryption_key_index =
1897 get_unaligned_le16(&raid_map->data_encryption_key_index);
1898 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
1899 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
1903 * Attempt to perform offload RAID mapping for a logical volume I/O.
1906 #define PQI_RAID_BYPASS_INELIGIBLE 1
1908 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
1909 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
1910 struct pqi_queue_group *queue_group)
1912 struct raid_map *raid_map;
1913 bool is_write = false;
1921 u32 first_row_offset;
1922 u32 last_row_offset;
1927 u32 r5or6_blocks_per_row;
1928 u64 r5or6_first_row;
1930 u32 r5or6_first_row_offset;
1931 u32 r5or6_last_row_offset;
1932 u32 r5or6_first_column;
1933 u32 r5or6_last_column;
1934 u16 data_disks_per_row;
1935 u32 total_disks_per_row;
1936 u16 layout_map_count;
1948 int offload_to_mirror;
1949 struct pqi_encryption_info *encryption_info_ptr;
1950 struct pqi_encryption_info encryption_info;
1951 #if BITS_PER_LONG == 32
1955 /* Check for valid opcode, get LBA and block count. */
1956 switch (scmd->cmnd[0]) {
1961 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
1962 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
1963 block_cnt = (u32)scmd->cmnd[4];
1971 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1972 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
1978 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1979 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1985 first_block = get_unaligned_be64(&scmd->cmnd[2]);
1986 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
1989 /* Process via normal I/O path. */
1990 return PQI_RAID_BYPASS_INELIGIBLE;
1993 /* Check for write to non-RAID-0. */
1994 if (is_write && device->raid_level != SA_RAID_0)
1995 return PQI_RAID_BYPASS_INELIGIBLE;
1997 if (unlikely(block_cnt == 0))
1998 return PQI_RAID_BYPASS_INELIGIBLE;
2000 last_block = first_block + block_cnt - 1;
2001 raid_map = device->raid_map;
2003 /* Check for invalid block or wraparound. */
2004 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2005 last_block < first_block)
2006 return PQI_RAID_BYPASS_INELIGIBLE;
2008 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2009 strip_size = get_unaligned_le16(&raid_map->strip_size);
2010 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2012 /* Calculate stripe information for the request. */
2013 blocks_per_row = data_disks_per_row * strip_size;
2014 #if BITS_PER_LONG == 32
2015 tmpdiv = first_block;
2016 do_div(tmpdiv, blocks_per_row);
2018 tmpdiv = last_block;
2019 do_div(tmpdiv, blocks_per_row);
2021 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2022 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2023 tmpdiv = first_row_offset;
2024 do_div(tmpdiv, strip_size);
2025 first_column = tmpdiv;
2026 tmpdiv = last_row_offset;
2027 do_div(tmpdiv, strip_size);
2028 last_column = tmpdiv;
2030 first_row = first_block / blocks_per_row;
2031 last_row = last_block / blocks_per_row;
2032 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2033 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2034 first_column = first_row_offset / strip_size;
2035 last_column = last_row_offset / strip_size;
2038 /* If this isn't a single row/column then give to the controller. */
2039 if (first_row != last_row || first_column != last_column)
2040 return PQI_RAID_BYPASS_INELIGIBLE;
2042 /* Proceeding with driver mapping. */
2043 total_disks_per_row = data_disks_per_row +
2044 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2045 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2046 get_unaligned_le16(&raid_map->row_cnt);
2047 map_index = (map_row * total_disks_per_row) + first_column;
2050 if (device->raid_level == SA_RAID_1) {
2051 if (device->offload_to_mirror)
2052 map_index += data_disks_per_row;
2053 device->offload_to_mirror = !device->offload_to_mirror;
2054 } else if (device->raid_level == SA_RAID_ADM) {
2057 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2060 offload_to_mirror = device->offload_to_mirror;
2061 if (offload_to_mirror == 0) {
2062 /* use physical disk in the first mirrored group. */
2063 map_index %= data_disks_per_row;
2067 * Determine mirror group that map_index
2070 current_group = map_index / data_disks_per_row;
2072 if (offload_to_mirror != current_group) {
2074 layout_map_count - 1) {
2076 * Select raid index from
2079 map_index += data_disks_per_row;
2083 * Select raid index from first
2086 map_index %= data_disks_per_row;
2090 } while (offload_to_mirror != current_group);
2093 /* Set mirror group to use next time. */
2095 (offload_to_mirror >= layout_map_count - 1) ?
2096 0 : offload_to_mirror + 1;
2097 WARN_ON(offload_to_mirror >= layout_map_count);
2098 device->offload_to_mirror = offload_to_mirror;
2100 * Avoid direct use of device->offload_to_mirror within this
2101 * function since multiple threads might simultaneously
2102 * increment it beyond the range of device->layout_map_count -1.
2104 } else if ((device->raid_level == SA_RAID_5 ||
2105 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2107 /* Verify first and last block are in same RAID group */
2108 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2109 stripesize = r5or6_blocks_per_row * layout_map_count;
2110 #if BITS_PER_LONG == 32
2111 tmpdiv = first_block;
2112 first_group = do_div(tmpdiv, stripesize);
2113 tmpdiv = first_group;
2114 do_div(tmpdiv, r5or6_blocks_per_row);
2115 first_group = tmpdiv;
2116 tmpdiv = last_block;
2117 last_group = do_div(tmpdiv, stripesize);
2118 tmpdiv = last_group;
2119 do_div(tmpdiv, r5or6_blocks_per_row);
2120 last_group = tmpdiv;
2122 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2123 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2125 if (first_group != last_group)
2126 return PQI_RAID_BYPASS_INELIGIBLE;
2128 /* Verify request is in a single row of RAID 5/6 */
2129 #if BITS_PER_LONG == 32
2130 tmpdiv = first_block;
2131 do_div(tmpdiv, stripesize);
2132 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2133 tmpdiv = last_block;
2134 do_div(tmpdiv, stripesize);
2135 r5or6_last_row = r0_last_row = tmpdiv;
2137 first_row = r5or6_first_row = r0_first_row =
2138 first_block / stripesize;
2139 r5or6_last_row = r0_last_row = last_block / stripesize;
2141 if (r5or6_first_row != r5or6_last_row)
2142 return PQI_RAID_BYPASS_INELIGIBLE;
2144 /* Verify request is in a single column */
2145 #if BITS_PER_LONG == 32
2146 tmpdiv = first_block;
2147 first_row_offset = do_div(tmpdiv, stripesize);
2148 tmpdiv = first_row_offset;
2149 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2150 r5or6_first_row_offset = first_row_offset;
2151 tmpdiv = last_block;
2152 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2153 tmpdiv = r5or6_last_row_offset;
2154 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2155 tmpdiv = r5or6_first_row_offset;
2156 do_div(tmpdiv, strip_size);
2157 first_column = r5or6_first_column = tmpdiv;
2158 tmpdiv = r5or6_last_row_offset;
2159 do_div(tmpdiv, strip_size);
2160 r5or6_last_column = tmpdiv;
2162 first_row_offset = r5or6_first_row_offset =
2163 (u32)((first_block % stripesize) %
2164 r5or6_blocks_per_row);
2166 r5or6_last_row_offset =
2167 (u32)((last_block % stripesize) %
2168 r5or6_blocks_per_row);
2170 first_column = r5or6_first_row_offset / strip_size;
2171 r5or6_first_column = first_column;
2172 r5or6_last_column = r5or6_last_row_offset / strip_size;
2174 if (r5or6_first_column != r5or6_last_column)
2175 return PQI_RAID_BYPASS_INELIGIBLE;
2177 /* Request is eligible */
2179 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2180 get_unaligned_le16(&raid_map->row_cnt);
2182 map_index = (first_group *
2183 (get_unaligned_le16(&raid_map->row_cnt) *
2184 total_disks_per_row)) +
2185 (map_row * total_disks_per_row) + first_column;
2188 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2189 return PQI_RAID_BYPASS_INELIGIBLE;
2191 aio_handle = raid_map->disk_data[map_index].aio_handle;
2192 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2193 first_row * strip_size +
2194 (first_row_offset - first_column * strip_size);
2195 disk_block_cnt = block_cnt;
2197 /* Handle differing logical/physical block sizes. */
2198 if (raid_map->phys_blk_shift) {
2199 disk_block <<= raid_map->phys_blk_shift;
2200 disk_block_cnt <<= raid_map->phys_blk_shift;
2203 if (unlikely(disk_block_cnt > 0xffff))
2204 return PQI_RAID_BYPASS_INELIGIBLE;
2206 /* Build the new CDB for the physical disk I/O. */
2207 if (disk_block > 0xffffffff) {
2208 cdb[0] = is_write ? WRITE_16 : READ_16;
2210 put_unaligned_be64(disk_block, &cdb[2]);
2211 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2216 cdb[0] = is_write ? WRITE_10 : READ_10;
2218 put_unaligned_be32((u32)disk_block, &cdb[2]);
2220 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2225 if (get_unaligned_le16(&raid_map->flags) &
2226 RAID_MAP_ENCRYPTION_ENABLED) {
2227 pqi_set_encryption_info(&encryption_info, raid_map,
2229 encryption_info_ptr = &encryption_info;
2231 encryption_info_ptr = NULL;
2234 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2235 cdb, cdb_length, queue_group, encryption_info_ptr);
2238 #define PQI_STATUS_IDLE 0x0
2240 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2241 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2243 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2244 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2245 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2246 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2247 #define PQI_DEVICE_STATE_ERROR 0x4
2249 #define PQI_MODE_READY_TIMEOUT_SECS 30
2250 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2252 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2254 struct pqi_device_registers __iomem *pqi_registers;
2255 unsigned long timeout;
2259 pqi_registers = ctrl_info->pqi_registers;
2260 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2263 signature = readq(&pqi_registers->signature);
2264 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2265 sizeof(signature)) == 0)
2267 if (time_after(jiffies, timeout)) {
2268 dev_err(&ctrl_info->pci_dev->dev,
2269 "timed out waiting for PQI signature\n");
2272 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2276 status = readb(&pqi_registers->function_and_status_code);
2277 if (status == PQI_STATUS_IDLE)
2279 if (time_after(jiffies, timeout)) {
2280 dev_err(&ctrl_info->pci_dev->dev,
2281 "timed out waiting for PQI IDLE\n");
2284 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2288 if (readl(&pqi_registers->device_status) ==
2289 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2291 if (time_after(jiffies, timeout)) {
2292 dev_err(&ctrl_info->pci_dev->dev,
2293 "timed out waiting for PQI all registers ready\n");
2296 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2302 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2304 struct pqi_scsi_dev *device;
2306 device = io_request->scmd->device->hostdata;
2307 device->offload_enabled = false;
2310 static inline void pqi_take_device_offline(struct scsi_device *sdev)
2312 struct pqi_ctrl_info *ctrl_info;
2313 struct pqi_scsi_dev *device;
2315 if (scsi_device_online(sdev)) {
2316 scsi_device_set_state(sdev, SDEV_OFFLINE);
2317 ctrl_info = shost_to_hba(sdev->host);
2318 schedule_delayed_work(&ctrl_info->rescan_work, 0);
2319 device = sdev->hostdata;
2320 dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
2321 ctrl_info->scsi_host->host_no, device->bus,
2322 device->target, device->lun);
2326 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2330 struct scsi_cmnd *scmd;
2331 struct pqi_raid_error_info *error_info;
2332 size_t sense_data_length;
2335 struct scsi_sense_hdr sshdr;
2337 scmd = io_request->scmd;
2341 error_info = io_request->error_info;
2342 scsi_status = error_info->status;
2345 if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2347 get_unaligned_le32(&error_info->data_out_transferred);
2348 residual_count = scsi_bufflen(scmd) - xfer_count;
2349 scsi_set_resid(scmd, residual_count);
2350 if (xfer_count < scmd->underflow)
2351 host_byte = DID_SOFT_ERROR;
2354 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2355 if (sense_data_length == 0)
2357 get_unaligned_le16(&error_info->response_data_length);
2358 if (sense_data_length) {
2359 if (sense_data_length > sizeof(error_info->data))
2360 sense_data_length = sizeof(error_info->data);
2362 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2363 scsi_normalize_sense(error_info->data,
2364 sense_data_length, &sshdr) &&
2365 sshdr.sense_key == HARDWARE_ERROR &&
2366 sshdr.asc == 0x3e &&
2367 sshdr.ascq == 0x1) {
2368 pqi_take_device_offline(scmd->device);
2369 host_byte = DID_NO_CONNECT;
2372 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2373 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2374 memcpy(scmd->sense_buffer, error_info->data,
2378 scmd->result = scsi_status;
2379 set_host_byte(scmd, host_byte);
2382 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2386 struct scsi_cmnd *scmd;
2387 struct pqi_aio_error_info *error_info;
2388 size_t sense_data_length;
2391 bool device_offline;
2393 scmd = io_request->scmd;
2394 error_info = io_request->error_info;
2396 sense_data_length = 0;
2397 device_offline = false;
2399 switch (error_info->service_response) {
2400 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2401 scsi_status = error_info->status;
2403 case PQI_AIO_SERV_RESPONSE_FAILURE:
2404 switch (error_info->status) {
2405 case PQI_AIO_STATUS_IO_ABORTED:
2406 scsi_status = SAM_STAT_TASK_ABORTED;
2408 case PQI_AIO_STATUS_UNDERRUN:
2409 scsi_status = SAM_STAT_GOOD;
2410 residual_count = get_unaligned_le32(
2411 &error_info->residual_count);
2412 scsi_set_resid(scmd, residual_count);
2413 xfer_count = scsi_bufflen(scmd) - residual_count;
2414 if (xfer_count < scmd->underflow)
2415 host_byte = DID_SOFT_ERROR;
2417 case PQI_AIO_STATUS_OVERRUN:
2418 scsi_status = SAM_STAT_GOOD;
2420 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2421 pqi_aio_path_disabled(io_request);
2422 scsi_status = SAM_STAT_GOOD;
2423 io_request->status = -EAGAIN;
2425 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2426 case PQI_AIO_STATUS_INVALID_DEVICE:
2427 device_offline = true;
2428 pqi_take_device_offline(scmd->device);
2429 host_byte = DID_NO_CONNECT;
2430 scsi_status = SAM_STAT_CHECK_CONDITION;
2432 case PQI_AIO_STATUS_IO_ERROR:
2434 scsi_status = SAM_STAT_CHECK_CONDITION;
2438 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2439 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2440 scsi_status = SAM_STAT_GOOD;
2442 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2443 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2445 scsi_status = SAM_STAT_CHECK_CONDITION;
2449 if (error_info->data_present) {
2451 get_unaligned_le16(&error_info->data_length);
2452 if (sense_data_length) {
2453 if (sense_data_length > sizeof(error_info->data))
2454 sense_data_length = sizeof(error_info->data);
2455 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2456 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2457 memcpy(scmd->sense_buffer, error_info->data,
2462 if (device_offline && sense_data_length == 0)
2463 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2466 scmd->result = scsi_status;
2467 set_host_byte(scmd, host_byte);
2470 static void pqi_process_io_error(unsigned int iu_type,
2471 struct pqi_io_request *io_request)
2474 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2475 pqi_process_raid_io_error(io_request);
2477 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2478 pqi_process_aio_io_error(io_request);
2483 static int pqi_interpret_task_management_response(
2484 struct pqi_task_management_response *response)
2488 switch (response->response_code) {
2489 case SOP_TMF_COMPLETE:
2490 case SOP_TMF_FUNCTION_SUCCEEDED:
2501 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2502 struct pqi_queue_group *queue_group)
2504 unsigned int num_responses;
2507 struct pqi_io_request *io_request;
2508 struct pqi_io_response *response;
2512 oq_ci = queue_group->oq_ci_copy;
2515 oq_pi = *queue_group->oq_pi;
2520 response = queue_group->oq_element_array +
2521 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2523 request_id = get_unaligned_le16(&response->request_id);
2524 WARN_ON(request_id >= ctrl_info->max_io_slots);
2526 io_request = &ctrl_info->io_request_pool[request_id];
2527 WARN_ON(atomic_read(&io_request->refcount) == 0);
2529 switch (response->header.iu_type) {
2530 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2531 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2532 if (io_request->scmd)
2533 io_request->scmd->result = 0;
2535 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2537 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2538 io_request->status =
2539 pqi_interpret_task_management_response(
2542 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2543 pqi_aio_path_disabled(io_request);
2544 io_request->status = -EAGAIN;
2546 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2547 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2548 io_request->error_info = ctrl_info->error_buffer +
2549 (get_unaligned_le16(&response->error_index) *
2550 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2551 pqi_process_io_error(response->header.iu_type,
2555 dev_err(&ctrl_info->pci_dev->dev,
2556 "unexpected IU type: 0x%x\n",
2557 response->header.iu_type);
2558 WARN_ON(response->header.iu_type);
2562 io_request->io_complete_callback(io_request,
2563 io_request->context);
2566 * Note that the I/O request structure CANNOT BE TOUCHED after
2567 * returning from the I/O completion callback!
2570 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2573 if (num_responses) {
2574 queue_group->oq_ci_copy = oq_ci;
2575 writel(oq_ci, queue_group->oq_ci);
2578 return num_responses;
2581 static inline unsigned int pqi_num_elements_free(unsigned int pi,
2582 unsigned int ci, unsigned int elements_in_queue)
2584 unsigned int num_elements_used;
2587 num_elements_used = pi - ci;
2589 num_elements_used = elements_in_queue - ci + pi;
2591 return elements_in_queue - num_elements_used - 1;
2594 #define PQI_EVENT_ACK_TIMEOUT 30
2596 static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
2597 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2601 unsigned long flags;
2603 unsigned long timeout;
2604 struct pqi_queue_group *queue_group;
2606 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2607 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2609 timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
2612 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2614 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2615 iq_ci = *queue_group->iq_ci[RAID_PATH];
2617 if (pqi_num_elements_free(iq_pi, iq_ci,
2618 ctrl_info->num_elements_per_iq))
2621 spin_unlock_irqrestore(
2622 &queue_group->submit_lock[RAID_PATH], flags);
2624 if (time_after(jiffies, timeout)) {
2625 dev_err(&ctrl_info->pci_dev->dev,
2626 "sending event acknowledge timed out\n");
2631 next_element = queue_group->iq_element_array[RAID_PATH] +
2632 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2634 memcpy(next_element, iu, iu_length);
2636 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
2638 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2641 * This write notifies the controller that an IU is available to be
2644 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2646 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
2649 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2650 struct pqi_event *event)
2652 struct pqi_event_acknowledge_request request;
2654 memset(&request, 0, sizeof(request));
2656 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2657 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2658 &request.header.iu_length);
2659 request.event_type = event->event_type;
2660 request.event_id = event->event_id;
2661 request.additional_event_id = event->additional_event_id;
2663 pqi_start_event_ack(ctrl_info, &request, sizeof(request));
2666 static void pqi_event_worker(struct work_struct *work)
2669 struct pqi_ctrl_info *ctrl_info;
2670 struct pqi_event *pending_event;
2671 bool got_non_heartbeat_event = false;
2673 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2675 pending_event = ctrl_info->pending_events;
2676 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
2677 if (pending_event->pending) {
2678 pending_event->pending = false;
2679 pqi_acknowledge_event(ctrl_info, pending_event);
2680 if (i != PQI_EVENT_HEARTBEAT)
2681 got_non_heartbeat_event = true;
2686 if (got_non_heartbeat_event)
2687 pqi_schedule_rescan_worker(ctrl_info);
2690 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2694 struct pqi_queue_group *queue_group;
2695 unsigned long flags;
2696 struct pqi_io_request *io_request;
2697 struct pqi_io_request *next;
2698 struct scsi_cmnd *scmd;
2700 ctrl_info->controller_online = false;
2701 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
2703 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2704 queue_group = &ctrl_info->queue_groups[i];
2706 for (path = 0; path < 2; path++) {
2708 &queue_group->submit_lock[path], flags);
2710 list_for_each_entry_safe(io_request, next,
2711 &queue_group->request_list[path],
2712 request_list_entry) {
2714 scmd = io_request->scmd;
2716 set_host_byte(scmd, DID_NO_CONNECT);
2717 pqi_scsi_done(scmd);
2720 list_del(&io_request->request_list_entry);
2723 spin_unlock_irqrestore(
2724 &queue_group->submit_lock[path], flags);
2729 #define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
2730 #define PQI_MAX_HEARTBEAT_REQUESTS 5
2732 static void pqi_heartbeat_timer_handler(unsigned long data)
2735 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2737 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
2739 if (num_interrupts == ctrl_info->previous_num_interrupts) {
2740 ctrl_info->num_heartbeats_requested++;
2741 if (ctrl_info->num_heartbeats_requested >
2742 PQI_MAX_HEARTBEAT_REQUESTS) {
2743 pqi_take_ctrl_offline(ctrl_info);
2746 ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
2747 schedule_work(&ctrl_info->event_work);
2749 ctrl_info->num_heartbeats_requested = 0;
2752 ctrl_info->previous_num_interrupts = num_interrupts;
2753 mod_timer(&ctrl_info->heartbeat_timer,
2754 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2757 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2759 ctrl_info->previous_num_interrupts =
2760 atomic_read(&ctrl_info->num_interrupts);
2762 init_timer(&ctrl_info->heartbeat_timer);
2763 ctrl_info->heartbeat_timer.expires =
2764 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2765 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2766 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
2767 add_timer(&ctrl_info->heartbeat_timer);
2768 ctrl_info->heartbeat_timer_started = true;
2771 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2773 if (ctrl_info->heartbeat_timer_started)
2774 del_timer_sync(&ctrl_info->heartbeat_timer);
2777 static int pqi_event_type_to_event_index(unsigned int event_type)
2781 switch (event_type) {
2782 case PQI_EVENT_TYPE_HEARTBEAT:
2783 index = PQI_EVENT_HEARTBEAT;
2785 case PQI_EVENT_TYPE_HOTPLUG:
2786 index = PQI_EVENT_HOTPLUG;
2788 case PQI_EVENT_TYPE_HARDWARE:
2789 index = PQI_EVENT_HARDWARE;
2791 case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
2792 index = PQI_EVENT_PHYSICAL_DEVICE;
2794 case PQI_EVENT_TYPE_LOGICAL_DEVICE:
2795 index = PQI_EVENT_LOGICAL_DEVICE;
2797 case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
2798 index = PQI_EVENT_AIO_STATE_CHANGE;
2800 case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
2801 index = PQI_EVENT_AIO_CONFIG_CHANGE;
2811 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2813 unsigned int num_events;
2816 struct pqi_event_queue *event_queue;
2817 struct pqi_event_response *response;
2818 struct pqi_event *pending_event;
2819 bool need_delayed_work;
2822 event_queue = &ctrl_info->event_queue;
2824 need_delayed_work = false;
2825 oq_ci = event_queue->oq_ci_copy;
2828 oq_pi = *event_queue->oq_pi;
2833 response = event_queue->oq_element_array +
2834 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2837 pqi_event_type_to_event_index(response->event_type);
2839 if (event_index >= 0) {
2840 if (response->request_acknowlege) {
2842 &ctrl_info->pending_events[event_index];
2843 pending_event->event_type =
2844 response->event_type;
2845 pending_event->event_id = response->event_id;
2846 pending_event->additional_event_id =
2847 response->additional_event_id;
2848 if (event_index != PQI_EVENT_HEARTBEAT) {
2849 pending_event->pending = true;
2850 need_delayed_work = true;
2855 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2859 event_queue->oq_ci_copy = oq_ci;
2860 writel(oq_ci, event_queue->oq_ci);
2862 if (need_delayed_work)
2863 schedule_work(&ctrl_info->event_work);
2869 static irqreturn_t pqi_irq_handler(int irq, void *data)
2871 struct pqi_ctrl_info *ctrl_info;
2872 struct pqi_queue_group *queue_group;
2873 unsigned int num_responses_handled;
2876 ctrl_info = queue_group->ctrl_info;
2878 if (!ctrl_info || !queue_group->oq_ci)
2881 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
2883 if (irq == ctrl_info->event_irq)
2884 num_responses_handled += pqi_process_event_intr(ctrl_info);
2886 if (num_responses_handled)
2887 atomic_inc(&ctrl_info->num_interrupts);
2889 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
2890 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
2895 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
2900 ctrl_info->event_irq = ctrl_info->msix_vectors[0];
2902 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
2903 rc = request_irq(ctrl_info->msix_vectors[i],
2905 DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
2907 dev_err(&ctrl_info->pci_dev->dev,
2908 "irq %u init failed with error %d\n",
2909 ctrl_info->msix_vectors[i], rc);
2912 ctrl_info->num_msix_vectors_initialized++;
2918 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
2922 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2923 free_irq(ctrl_info->msix_vectors[i],
2924 ctrl_info->intr_data[i]);
2927 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
2931 int num_vectors_enabled;
2932 struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
2934 max_vectors = ctrl_info->num_queue_groups;
2936 for (i = 0; i < max_vectors; i++)
2937 msix_entries[i].entry = i;
2939 num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
2940 msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
2942 if (num_vectors_enabled < 0) {
2943 dev_err(&ctrl_info->pci_dev->dev,
2944 "MSI-X init failed with error %d\n",
2945 num_vectors_enabled);
2946 return num_vectors_enabled;
2949 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
2950 for (i = 0; i < num_vectors_enabled; i++) {
2951 ctrl_info->msix_vectors[i] = msix_entries[i].vector;
2952 ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
2958 static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2964 cpu = cpumask_first(cpu_online_mask);
2965 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
2966 rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
2969 dev_err(&ctrl_info->pci_dev->dev,
2970 "error %d setting affinity hint for irq vector %u\n",
2971 rc, ctrl_info->msix_vectors[i]);
2972 cpu = cpumask_next(cpu, cpu_online_mask);
2976 static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2980 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2981 irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
2984 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
2987 size_t alloc_length;
2988 size_t element_array_length_per_iq;
2989 size_t element_array_length_per_oq;
2990 void *element_array;
2991 void *next_queue_index;
2992 void *aligned_pointer;
2993 unsigned int num_inbound_queues;
2994 unsigned int num_outbound_queues;
2995 unsigned int num_queue_indexes;
2996 struct pqi_queue_group *queue_group;
2998 element_array_length_per_iq =
2999 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3000 ctrl_info->num_elements_per_iq;
3001 element_array_length_per_oq =
3002 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3003 ctrl_info->num_elements_per_oq;
3004 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3005 num_outbound_queues = ctrl_info->num_queue_groups;
3006 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3008 aligned_pointer = NULL;
3010 for (i = 0; i < num_inbound_queues; i++) {
3011 aligned_pointer = PTR_ALIGN(aligned_pointer,
3012 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3013 aligned_pointer += element_array_length_per_iq;
3016 for (i = 0; i < num_outbound_queues; i++) {
3017 aligned_pointer = PTR_ALIGN(aligned_pointer,
3018 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3019 aligned_pointer += element_array_length_per_oq;
3022 aligned_pointer = PTR_ALIGN(aligned_pointer,
3023 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3024 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3025 PQI_EVENT_OQ_ELEMENT_LENGTH;
3027 for (i = 0; i < num_queue_indexes; i++) {
3028 aligned_pointer = PTR_ALIGN(aligned_pointer,
3029 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3030 aligned_pointer += sizeof(pqi_index_t);
3033 alloc_length = (size_t)aligned_pointer +
3034 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3036 ctrl_info->queue_memory_base =
3037 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3039 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3041 if (!ctrl_info->queue_memory_base) {
3042 dev_err(&ctrl_info->pci_dev->dev,
3043 "failed to allocate memory for PQI admin queues\n");
3047 ctrl_info->queue_memory_length = alloc_length;
3049 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3050 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3052 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3053 queue_group = &ctrl_info->queue_groups[i];
3054 queue_group->iq_element_array[RAID_PATH] = element_array;
3055 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3056 ctrl_info->queue_memory_base_dma_handle +
3057 (element_array - ctrl_info->queue_memory_base);
3058 element_array += element_array_length_per_iq;
3059 element_array = PTR_ALIGN(element_array,
3060 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3061 queue_group->iq_element_array[AIO_PATH] = element_array;
3062 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3063 ctrl_info->queue_memory_base_dma_handle +
3064 (element_array - ctrl_info->queue_memory_base);
3065 element_array += element_array_length_per_iq;
3066 element_array = PTR_ALIGN(element_array,
3067 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3070 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3071 queue_group = &ctrl_info->queue_groups[i];
3072 queue_group->oq_element_array = element_array;
3073 queue_group->oq_element_array_bus_addr =
3074 ctrl_info->queue_memory_base_dma_handle +
3075 (element_array - ctrl_info->queue_memory_base);
3076 element_array += element_array_length_per_oq;
3077 element_array = PTR_ALIGN(element_array,
3078 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3081 ctrl_info->event_queue.oq_element_array = element_array;
3082 ctrl_info->event_queue.oq_element_array_bus_addr =
3083 ctrl_info->queue_memory_base_dma_handle +
3084 (element_array - ctrl_info->queue_memory_base);
3085 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3086 PQI_EVENT_OQ_ELEMENT_LENGTH;
3088 next_queue_index = PTR_ALIGN(element_array,
3089 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3091 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3092 queue_group = &ctrl_info->queue_groups[i];
3093 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3094 queue_group->iq_ci_bus_addr[RAID_PATH] =
3095 ctrl_info->queue_memory_base_dma_handle +
3096 (next_queue_index - ctrl_info->queue_memory_base);
3097 next_queue_index += sizeof(pqi_index_t);
3098 next_queue_index = PTR_ALIGN(next_queue_index,
3099 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3100 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3101 queue_group->iq_ci_bus_addr[AIO_PATH] =
3102 ctrl_info->queue_memory_base_dma_handle +
3103 (next_queue_index - ctrl_info->queue_memory_base);
3104 next_queue_index += sizeof(pqi_index_t);
3105 next_queue_index = PTR_ALIGN(next_queue_index,
3106 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3107 queue_group->oq_pi = next_queue_index;
3108 queue_group->oq_pi_bus_addr =
3109 ctrl_info->queue_memory_base_dma_handle +
3110 (next_queue_index - ctrl_info->queue_memory_base);
3111 next_queue_index += sizeof(pqi_index_t);
3112 next_queue_index = PTR_ALIGN(next_queue_index,
3113 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3116 ctrl_info->event_queue.oq_pi = next_queue_index;
3117 ctrl_info->event_queue.oq_pi_bus_addr =
3118 ctrl_info->queue_memory_base_dma_handle +
3119 (next_queue_index - ctrl_info->queue_memory_base);
3124 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3127 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3128 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3131 * Initialize the backpointers to the controller structure in
3132 * each operational queue group structure.
3134 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3135 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3138 * Assign IDs to all operational queues. Note that the IDs
3139 * assigned to operational IQs are independent of the IDs
3140 * assigned to operational OQs.
3142 ctrl_info->event_queue.oq_id = next_oq_id++;
3143 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3144 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3145 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3146 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3150 * Assign MSI-X table entry indexes to all queues. Note that the
3151 * interrupt for the event queue is shared with the first queue group.
3153 ctrl_info->event_queue.int_msg_num = 0;
3154 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3155 ctrl_info->queue_groups[i].int_msg_num = i;
3157 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3158 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3159 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3160 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3161 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3165 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3167 size_t alloc_length;
3168 struct pqi_admin_queues_aligned *admin_queues_aligned;
3169 struct pqi_admin_queues *admin_queues;
3171 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3172 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3174 ctrl_info->admin_queue_memory_base =
3175 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3177 &ctrl_info->admin_queue_memory_base_dma_handle,
3180 if (!ctrl_info->admin_queue_memory_base)
3183 ctrl_info->admin_queue_memory_length = alloc_length;
3185 admin_queues = &ctrl_info->admin_queues;
3186 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3187 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3188 admin_queues->iq_element_array =
3189 &admin_queues_aligned->iq_element_array;
3190 admin_queues->oq_element_array =
3191 &admin_queues_aligned->oq_element_array;
3192 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3193 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3195 admin_queues->iq_element_array_bus_addr =
3196 ctrl_info->admin_queue_memory_base_dma_handle +
3197 (admin_queues->iq_element_array -
3198 ctrl_info->admin_queue_memory_base);
3199 admin_queues->oq_element_array_bus_addr =
3200 ctrl_info->admin_queue_memory_base_dma_handle +
3201 (admin_queues->oq_element_array -
3202 ctrl_info->admin_queue_memory_base);
3203 admin_queues->iq_ci_bus_addr =
3204 ctrl_info->admin_queue_memory_base_dma_handle +
3205 ((void *)admin_queues->iq_ci -
3206 ctrl_info->admin_queue_memory_base);
3207 admin_queues->oq_pi_bus_addr =
3208 ctrl_info->admin_queue_memory_base_dma_handle +
3209 ((void *)admin_queues->oq_pi -
3210 ctrl_info->admin_queue_memory_base);
3215 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3216 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3218 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3220 struct pqi_device_registers __iomem *pqi_registers;
3221 struct pqi_admin_queues *admin_queues;
3222 unsigned long timeout;
3226 pqi_registers = ctrl_info->pqi_registers;
3227 admin_queues = &ctrl_info->admin_queues;
3229 writeq((u64)admin_queues->iq_element_array_bus_addr,
3230 &pqi_registers->admin_iq_element_array_addr);
3231 writeq((u64)admin_queues->oq_element_array_bus_addr,
3232 &pqi_registers->admin_oq_element_array_addr);
3233 writeq((u64)admin_queues->iq_ci_bus_addr,
3234 &pqi_registers->admin_iq_ci_addr);
3235 writeq((u64)admin_queues->oq_pi_bus_addr,
3236 &pqi_registers->admin_oq_pi_addr);
3238 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3239 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3240 (admin_queues->int_msg_num << 16);
3241 writel(reg, &pqi_registers->admin_iq_num_elements);
3242 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3243 &pqi_registers->function_and_status_code);
3245 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3247 status = readb(&pqi_registers->function_and_status_code);
3248 if (status == PQI_STATUS_IDLE)
3250 if (time_after(jiffies, timeout))
3252 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3256 * The offset registers are not initialized to the correct
3257 * offsets until *after* the create admin queue pair command
3258 * completes successfully.
3260 admin_queues->iq_pi = ctrl_info->iomem_base +
3261 PQI_DEVICE_REGISTERS_OFFSET +
3262 readq(&pqi_registers->admin_iq_pi_offset);
3263 admin_queues->oq_ci = ctrl_info->iomem_base +
3264 PQI_DEVICE_REGISTERS_OFFSET +
3265 readq(&pqi_registers->admin_oq_ci_offset);
3270 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3271 struct pqi_general_admin_request *request)
3273 struct pqi_admin_queues *admin_queues;
3277 admin_queues = &ctrl_info->admin_queues;
3278 iq_pi = admin_queues->iq_pi_copy;
3280 next_element = admin_queues->iq_element_array +
3281 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3283 memcpy(next_element, request, sizeof(*request));
3285 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3286 admin_queues->iq_pi_copy = iq_pi;
3289 * This write notifies the controller that an IU is available to be
3292 writel(iq_pi, admin_queues->iq_pi);
3295 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3296 struct pqi_general_admin_response *response)
3298 struct pqi_admin_queues *admin_queues;
3301 unsigned long timeout;
3303 admin_queues = &ctrl_info->admin_queues;
3304 oq_ci = admin_queues->oq_ci_copy;
3306 timeout = (3 * HZ) + jiffies;
3309 oq_pi = *admin_queues->oq_pi;
3312 if (time_after(jiffies, timeout)) {
3313 dev_err(&ctrl_info->pci_dev->dev,
3314 "timed out waiting for admin response\n");
3317 usleep_range(1000, 2000);
3320 memcpy(response, admin_queues->oq_element_array +
3321 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3323 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3324 admin_queues->oq_ci_copy = oq_ci;
3325 writel(oq_ci, admin_queues->oq_ci);
3330 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3331 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3332 struct pqi_io_request *io_request)
3334 struct pqi_io_request *next;
3339 unsigned long flags;
3340 unsigned int num_elements_needed;
3341 unsigned int num_elements_to_end_of_queue;
3343 struct pqi_iu_header *request;
3345 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3348 list_add_tail(&io_request->request_list_entry,
3349 &queue_group->request_list[path]);
3351 iq_pi = queue_group->iq_pi_copy[path];
3353 list_for_each_entry_safe(io_request, next,
3354 &queue_group->request_list[path], request_list_entry) {
3356 request = io_request->iu;
3358 iu_length = get_unaligned_le16(&request->iu_length) +
3359 PQI_REQUEST_HEADER_LENGTH;
3360 num_elements_needed =
3361 DIV_ROUND_UP(iu_length,
3362 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3364 iq_ci = *queue_group->iq_ci[path];
3366 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3367 ctrl_info->num_elements_per_iq))
3370 put_unaligned_le16(queue_group->oq_id,
3371 &request->response_queue_id);
3373 next_element = queue_group->iq_element_array[path] +
3374 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3376 num_elements_to_end_of_queue =
3377 ctrl_info->num_elements_per_iq - iq_pi;
3379 if (num_elements_needed <= num_elements_to_end_of_queue) {
3380 memcpy(next_element, request, iu_length);
3382 copy_count = num_elements_to_end_of_queue *
3383 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3384 memcpy(next_element, request, copy_count);
3385 memcpy(queue_group->iq_element_array[path],
3386 (u8 *)request + copy_count,
3387 iu_length - copy_count);
3390 iq_pi = (iq_pi + num_elements_needed) %
3391 ctrl_info->num_elements_per_iq;
3393 list_del(&io_request->request_list_entry);
3396 if (iq_pi != queue_group->iq_pi_copy[path]) {
3397 queue_group->iq_pi_copy[path] = iq_pi;
3399 * This write notifies the controller that one or more IUs are
3400 * available to be processed.
3402 writel(iq_pi, queue_group->iq_pi[path]);
3405 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3408 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3411 struct completion *waiting = context;
3416 static int pqi_submit_raid_request_synchronous_with_io_request(
3417 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3418 unsigned long timeout_msecs)
3421 DECLARE_COMPLETION_ONSTACK(wait);
3423 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3424 io_request->context = &wait;
3426 pqi_start_io(ctrl_info,
3427 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3430 if (timeout_msecs == NO_TIMEOUT) {
3431 wait_for_completion_io(&wait);
3433 if (!wait_for_completion_io_timeout(&wait,
3434 msecs_to_jiffies(timeout_msecs))) {
3435 dev_warn(&ctrl_info->pci_dev->dev,
3436 "command timed out\n");
3444 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3445 struct pqi_iu_header *request, unsigned int flags,
3446 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3449 struct pqi_io_request *io_request;
3450 unsigned long start_jiffies;
3451 unsigned long msecs_blocked;
3455 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3456 * are mutually exclusive.
3459 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3460 if (down_interruptible(&ctrl_info->sync_request_sem))
3461 return -ERESTARTSYS;
3463 if (timeout_msecs == NO_TIMEOUT) {
3464 down(&ctrl_info->sync_request_sem);
3466 start_jiffies = jiffies;
3467 if (down_timeout(&ctrl_info->sync_request_sem,
3468 msecs_to_jiffies(timeout_msecs)))
3471 jiffies_to_msecs(jiffies - start_jiffies);
3472 if (msecs_blocked >= timeout_msecs)
3474 timeout_msecs -= msecs_blocked;
3478 io_request = pqi_alloc_io_request(ctrl_info);
3480 put_unaligned_le16(io_request->index,
3481 &(((struct pqi_raid_path_request *)request)->request_id));
3483 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3484 ((struct pqi_raid_path_request *)request)->error_index =
3485 ((struct pqi_raid_path_request *)request)->request_id;
3487 iu_length = get_unaligned_le16(&request->iu_length) +
3488 PQI_REQUEST_HEADER_LENGTH;
3489 memcpy(io_request->iu, request, iu_length);
3491 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3492 io_request, timeout_msecs);
3495 if (io_request->error_info)
3496 memcpy(error_info, io_request->error_info,
3497 sizeof(*error_info));
3499 memset(error_info, 0, sizeof(*error_info));
3500 } else if (rc == 0 && io_request->error_info) {
3502 struct pqi_raid_error_info *raid_error_info;
3504 raid_error_info = io_request->error_info;
3505 scsi_status = raid_error_info->status;
3507 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3508 raid_error_info->data_out_result ==
3509 PQI_DATA_IN_OUT_UNDERFLOW)
3510 scsi_status = SAM_STAT_GOOD;
3512 if (scsi_status != SAM_STAT_GOOD)
3516 pqi_free_io_request(io_request);
3518 up(&ctrl_info->sync_request_sem);
3523 static int pqi_validate_admin_response(
3524 struct pqi_general_admin_response *response, u8 expected_function_code)
3526 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3529 if (get_unaligned_le16(&response->header.iu_length) !=
3530 PQI_GENERAL_ADMIN_IU_LENGTH)
3533 if (response->function_code != expected_function_code)
3536 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3542 static int pqi_submit_admin_request_synchronous(
3543 struct pqi_ctrl_info *ctrl_info,
3544 struct pqi_general_admin_request *request,
3545 struct pqi_general_admin_response *response)
3549 pqi_submit_admin_request(ctrl_info, request);
3551 rc = pqi_poll_for_admin_response(ctrl_info, response);
3554 rc = pqi_validate_admin_response(response,
3555 request->function_code);
3560 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3563 struct pqi_general_admin_request request;
3564 struct pqi_general_admin_response response;
3565 struct pqi_device_capability *capability;
3566 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3568 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3572 memset(&request, 0, sizeof(request));
3574 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3575 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3576 &request.header.iu_length);
3577 request.function_code =
3578 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3579 put_unaligned_le32(sizeof(*capability),
3580 &request.data.report_device_capability.buffer_length);
3582 rc = pqi_map_single(ctrl_info->pci_dev,
3583 &request.data.report_device_capability.sg_descriptor,
3584 capability, sizeof(*capability),
3585 PCI_DMA_FROMDEVICE);
3589 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3592 pqi_pci_unmap(ctrl_info->pci_dev,
3593 &request.data.report_device_capability.sg_descriptor, 1,
3594 PCI_DMA_FROMDEVICE);
3599 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3604 ctrl_info->max_inbound_queues =
3605 get_unaligned_le16(&capability->max_inbound_queues);
3606 ctrl_info->max_elements_per_iq =
3607 get_unaligned_le16(&capability->max_elements_per_iq);
3608 ctrl_info->max_iq_element_length =
3609 get_unaligned_le16(&capability->max_iq_element_length)
3611 ctrl_info->max_outbound_queues =
3612 get_unaligned_le16(&capability->max_outbound_queues);
3613 ctrl_info->max_elements_per_oq =
3614 get_unaligned_le16(&capability->max_elements_per_oq);
3615 ctrl_info->max_oq_element_length =
3616 get_unaligned_le16(&capability->max_oq_element_length)
3619 sop_iu_layer_descriptor =
3620 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3622 ctrl_info->max_inbound_iu_length_per_firmware =
3624 &sop_iu_layer_descriptor->max_inbound_iu_length);
3625 ctrl_info->inbound_spanning_supported =
3626 sop_iu_layer_descriptor->inbound_spanning_supported;
3627 ctrl_info->outbound_spanning_supported =
3628 sop_iu_layer_descriptor->outbound_spanning_supported;
3636 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3638 if (ctrl_info->max_iq_element_length <
3639 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3640 dev_err(&ctrl_info->pci_dev->dev,
3641 "max. inbound queue element length of %d is less than the required length of %d\n",
3642 ctrl_info->max_iq_element_length,
3643 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3647 if (ctrl_info->max_oq_element_length <
3648 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3649 dev_err(&ctrl_info->pci_dev->dev,
3650 "max. outbound queue element length of %d is less than the required length of %d\n",
3651 ctrl_info->max_oq_element_length,
3652 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3656 if (ctrl_info->max_inbound_iu_length_per_firmware <
3657 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3658 dev_err(&ctrl_info->pci_dev->dev,
3659 "max. inbound IU length of %u is less than the min. required length of %d\n",
3660 ctrl_info->max_inbound_iu_length_per_firmware,
3661 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3665 if (!ctrl_info->inbound_spanning_supported) {
3666 dev_err(&ctrl_info->pci_dev->dev,
3667 "the controller does not support inbound spanning\n");
3671 if (ctrl_info->outbound_spanning_supported) {
3672 dev_err(&ctrl_info->pci_dev->dev,
3673 "the controller supports outbound spanning but this driver does not\n");
3680 static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3681 bool inbound_queue, u16 queue_id)
3683 struct pqi_general_admin_request request;
3684 struct pqi_general_admin_response response;
3686 memset(&request, 0, sizeof(request));
3687 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3688 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3689 &request.header.iu_length);
3691 request.function_code =
3692 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3694 request.function_code =
3695 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3696 put_unaligned_le16(queue_id,
3697 &request.data.delete_operational_queue.queue_id);
3699 return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3703 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3706 struct pqi_event_queue *event_queue;
3707 struct pqi_general_admin_request request;
3708 struct pqi_general_admin_response response;
3710 event_queue = &ctrl_info->event_queue;
3713 * Create OQ (Outbound Queue - device to host queue) to dedicate
3716 memset(&request, 0, sizeof(request));
3717 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3718 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3719 &request.header.iu_length);
3720 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3721 put_unaligned_le16(event_queue->oq_id,
3722 &request.data.create_operational_oq.queue_id);
3723 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3724 &request.data.create_operational_oq.element_array_addr);
3725 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3726 &request.data.create_operational_oq.pi_addr);
3727 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3728 &request.data.create_operational_oq.num_elements);
3729 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3730 &request.data.create_operational_oq.element_length);
3731 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3732 put_unaligned_le16(event_queue->int_msg_num,
3733 &request.data.create_operational_oq.int_msg_num);
3735 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3740 event_queue->oq_ci = ctrl_info->iomem_base +
3741 PQI_DEVICE_REGISTERS_OFFSET +
3743 &response.data.create_operational_oq.oq_ci_offset);
3748 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
3752 struct pqi_queue_group *queue_group;
3753 struct pqi_general_admin_request request;
3754 struct pqi_general_admin_response response;
3756 i = ctrl_info->num_active_queue_groups;
3757 queue_group = &ctrl_info->queue_groups[i];
3760 * Create IQ (Inbound Queue - host to device queue) for
3763 memset(&request, 0, sizeof(request));
3764 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3765 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3766 &request.header.iu_length);
3767 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3768 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3769 &request.data.create_operational_iq.queue_id);
3771 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3772 &request.data.create_operational_iq.element_array_addr);
3773 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3774 &request.data.create_operational_iq.ci_addr);
3775 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3776 &request.data.create_operational_iq.num_elements);
3777 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3778 &request.data.create_operational_iq.element_length);
3779 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3781 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3784 dev_err(&ctrl_info->pci_dev->dev,
3785 "error creating inbound RAID queue\n");
3789 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3790 PQI_DEVICE_REGISTERS_OFFSET +
3792 &response.data.create_operational_iq.iq_pi_offset);
3795 * Create IQ (Inbound Queue - host to device queue) for
3796 * Advanced I/O (AIO) path.
3798 memset(&request, 0, sizeof(request));
3799 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3800 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3801 &request.header.iu_length);
3802 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3803 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3804 &request.data.create_operational_iq.queue_id);
3805 put_unaligned_le64((u64)queue_group->
3806 iq_element_array_bus_addr[AIO_PATH],
3807 &request.data.create_operational_iq.element_array_addr);
3808 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
3809 &request.data.create_operational_iq.ci_addr);
3810 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3811 &request.data.create_operational_iq.num_elements);
3812 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3813 &request.data.create_operational_iq.element_length);
3814 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3816 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3819 dev_err(&ctrl_info->pci_dev->dev,
3820 "error creating inbound AIO queue\n");
3821 goto delete_inbound_queue_raid;
3824 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
3825 PQI_DEVICE_REGISTERS_OFFSET +
3827 &response.data.create_operational_iq.iq_pi_offset);
3830 * Designate the 2nd IQ as the AIO path. By default, all IQs are
3831 * assumed to be for RAID path I/O unless we change the queue's
3834 memset(&request, 0, sizeof(request));
3835 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3836 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3837 &request.header.iu_length);
3838 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
3839 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3840 &request.data.change_operational_iq_properties.queue_id);
3841 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
3842 &request.data.change_operational_iq_properties.vendor_specific);
3844 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3847 dev_err(&ctrl_info->pci_dev->dev,
3848 "error changing queue property\n");
3849 goto delete_inbound_queue_aio;
3853 * Create OQ (Outbound Queue - device to host queue).
3855 memset(&request, 0, sizeof(request));
3856 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3857 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3858 &request.header.iu_length);
3859 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3860 put_unaligned_le16(queue_group->oq_id,
3861 &request.data.create_operational_oq.queue_id);
3862 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
3863 &request.data.create_operational_oq.element_array_addr);
3864 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
3865 &request.data.create_operational_oq.pi_addr);
3866 put_unaligned_le16(ctrl_info->num_elements_per_oq,
3867 &request.data.create_operational_oq.num_elements);
3868 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
3869 &request.data.create_operational_oq.element_length);
3870 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3871 put_unaligned_le16(queue_group->int_msg_num,
3872 &request.data.create_operational_oq.int_msg_num);
3874 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3877 dev_err(&ctrl_info->pci_dev->dev,
3878 "error creating outbound queue\n");
3879 goto delete_inbound_queue_aio;
3882 queue_group->oq_ci = ctrl_info->iomem_base +
3883 PQI_DEVICE_REGISTERS_OFFSET +
3885 &response.data.create_operational_oq.oq_ci_offset);
3887 ctrl_info->num_active_queue_groups++;
3891 delete_inbound_queue_aio:
3892 pqi_delete_operational_queue(ctrl_info, true,
3893 queue_group->iq_id[AIO_PATH]);
3895 delete_inbound_queue_raid:
3896 pqi_delete_operational_queue(ctrl_info, true,
3897 queue_group->iq_id[RAID_PATH]);
3902 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
3907 rc = pqi_create_event_queue(ctrl_info);
3909 dev_err(&ctrl_info->pci_dev->dev,
3910 "error creating event queue\n");
3914 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3915 rc = pqi_create_queue_group(ctrl_info);
3917 dev_err(&ctrl_info->pci_dev->dev,
3918 "error creating queue group number %u/%u\n",
3919 i, ctrl_info->num_queue_groups);
3927 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
3928 (offsetof(struct pqi_event_config, descriptors) + \
3929 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
3931 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
3935 struct pqi_event_config *event_config;
3936 struct pqi_general_management_request request;
3938 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3943 memset(&request, 0, sizeof(request));
3945 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
3946 put_unaligned_le16(offsetof(struct pqi_general_management_request,
3947 data.report_event_configuration.sg_descriptors[1]) -
3948 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3949 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3950 &request.data.report_event_configuration.buffer_length);
3952 rc = pqi_map_single(ctrl_info->pci_dev,
3953 request.data.report_event_configuration.sg_descriptors,
3954 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3955 PCI_DMA_FROMDEVICE);
3959 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
3960 0, NULL, NO_TIMEOUT);
3962 pqi_pci_unmap(ctrl_info->pci_dev,
3963 request.data.report_event_configuration.sg_descriptors, 1,
3964 PCI_DMA_FROMDEVICE);
3969 for (i = 0; i < event_config->num_event_descriptors; i++)
3970 put_unaligned_le16(ctrl_info->event_queue.oq_id,
3971 &event_config->descriptors[i].oq_id);
3973 memset(&request, 0, sizeof(request));
3975 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
3976 put_unaligned_le16(offsetof(struct pqi_general_management_request,
3977 data.report_event_configuration.sg_descriptors[1]) -
3978 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3979 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3980 &request.data.report_event_configuration.buffer_length);
3982 rc = pqi_map_single(ctrl_info->pci_dev,
3983 request.data.report_event_configuration.sg_descriptors,
3984 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3989 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
3992 pqi_pci_unmap(ctrl_info->pci_dev,
3993 request.data.report_event_configuration.sg_descriptors, 1,
3997 kfree(event_config);
4002 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4006 size_t sg_chain_buffer_length;
4007 struct pqi_io_request *io_request;
4009 if (!ctrl_info->io_request_pool)
4012 dev = &ctrl_info->pci_dev->dev;
4013 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4014 io_request = ctrl_info->io_request_pool;
4016 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4017 kfree(io_request->iu);
4018 if (!io_request->sg_chain_buffer)
4020 dma_free_coherent(dev, sg_chain_buffer_length,
4021 io_request->sg_chain_buffer,
4022 io_request->sg_chain_buffer_dma_handle);
4026 kfree(ctrl_info->io_request_pool);
4027 ctrl_info->io_request_pool = NULL;
4030 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4032 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4033 ctrl_info->error_buffer_length,
4034 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4036 if (!ctrl_info->error_buffer)
4042 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4045 void *sg_chain_buffer;
4046 size_t sg_chain_buffer_length;
4047 dma_addr_t sg_chain_buffer_dma_handle;
4049 struct pqi_io_request *io_request;
4051 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4052 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4054 if (!ctrl_info->io_request_pool) {
4055 dev_err(&ctrl_info->pci_dev->dev,
4056 "failed to allocate I/O request pool\n");
4060 dev = &ctrl_info->pci_dev->dev;
4061 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4062 io_request = ctrl_info->io_request_pool;
4064 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4066 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4068 if (!io_request->iu) {
4069 dev_err(&ctrl_info->pci_dev->dev,
4070 "failed to allocate IU buffers\n");
4074 sg_chain_buffer = dma_alloc_coherent(dev,
4075 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4078 if (!sg_chain_buffer) {
4079 dev_err(&ctrl_info->pci_dev->dev,
4080 "failed to allocate PQI scatter-gather chain buffers\n");
4084 io_request->index = i;
4085 io_request->sg_chain_buffer = sg_chain_buffer;
4086 io_request->sg_chain_buffer_dma_handle =
4087 sg_chain_buffer_dma_handle;
4094 pqi_free_all_io_requests(ctrl_info);
4100 * Calculate required resources that are sized based on max. outstanding
4101 * requests and max. transfer size.
4104 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4106 u32 max_transfer_size;
4109 ctrl_info->scsi_ml_can_queue =
4110 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4111 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4113 ctrl_info->error_buffer_length =
4114 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4117 min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
4119 max_sg_entries = max_transfer_size / PAGE_SIZE;
4121 /* +1 to cover when the buffer is not page-aligned. */
4124 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4126 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4128 ctrl_info->sg_chain_buffer_length =
4129 max_sg_entries * sizeof(struct pqi_sg_descriptor);
4130 ctrl_info->sg_tablesize = max_sg_entries;
4131 ctrl_info->max_sectors = max_transfer_size / 512;
4134 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4137 int max_queue_groups;
4138 int num_queue_groups;
4139 u16 num_elements_per_iq;
4140 u16 num_elements_per_oq;
4142 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4143 ctrl_info->max_outbound_queues - 1);
4144 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4146 num_cpus = num_online_cpus();
4147 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4148 num_queue_groups = min(num_queue_groups, max_queue_groups);
4150 ctrl_info->num_queue_groups = num_queue_groups;
4153 * Make sure that the max. inbound IU length is an even multiple
4154 * of our inbound element length.
4156 ctrl_info->max_inbound_iu_length =
4157 (ctrl_info->max_inbound_iu_length_per_firmware /
4158 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4159 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4161 num_elements_per_iq =
4162 (ctrl_info->max_inbound_iu_length /
4163 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4165 /* Add one because one element in each queue is unusable. */
4166 num_elements_per_iq++;
4168 num_elements_per_iq = min(num_elements_per_iq,
4169 ctrl_info->max_elements_per_iq);
4171 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4172 num_elements_per_oq = min(num_elements_per_oq,
4173 ctrl_info->max_elements_per_oq);
4175 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4176 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4178 ctrl_info->max_sg_per_iu =
4179 ((ctrl_info->max_inbound_iu_length -
4180 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4181 sizeof(struct pqi_sg_descriptor)) +
4182 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4185 static inline void pqi_set_sg_descriptor(
4186 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4188 u64 address = (u64)sg_dma_address(sg);
4189 unsigned int length = sg_dma_len(sg);
4191 put_unaligned_le64(address, &sg_descriptor->address);
4192 put_unaligned_le32(length, &sg_descriptor->length);
4193 put_unaligned_le32(0, &sg_descriptor->flags);
4196 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4197 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4198 struct pqi_io_request *io_request)
4204 unsigned int num_sg_in_iu;
4205 unsigned int max_sg_per_iu;
4206 struct scatterlist *sg;
4207 struct pqi_sg_descriptor *sg_descriptor;
4209 sg_count = scsi_dma_map(scmd);
4213 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4214 PQI_REQUEST_HEADER_LENGTH;
4219 sg = scsi_sglist(scmd);
4220 sg_descriptor = request->sg_descriptors;
4221 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4227 pqi_set_sg_descriptor(sg_descriptor, sg);
4234 if (i == max_sg_per_iu) {
4236 (u64)io_request->sg_chain_buffer_dma_handle,
4237 &sg_descriptor->address);
4238 put_unaligned_le32((sg_count - num_sg_in_iu)
4239 * sizeof(*sg_descriptor),
4240 &sg_descriptor->length);
4241 put_unaligned_le32(CISS_SG_CHAIN,
4242 &sg_descriptor->flags);
4245 sg_descriptor = io_request->sg_chain_buffer;
4250 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4251 request->partial = chained;
4252 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4255 put_unaligned_le16(iu_length, &request->header.iu_length);
4260 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4261 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4262 struct pqi_io_request *io_request)
4268 unsigned int num_sg_in_iu;
4269 unsigned int max_sg_per_iu;
4270 struct scatterlist *sg;
4271 struct pqi_sg_descriptor *sg_descriptor;
4273 sg_count = scsi_dma_map(scmd);
4277 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4278 PQI_REQUEST_HEADER_LENGTH;
4284 sg = scsi_sglist(scmd);
4285 sg_descriptor = request->sg_descriptors;
4286 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4291 pqi_set_sg_descriptor(sg_descriptor, sg);
4298 if (i == max_sg_per_iu) {
4300 (u64)io_request->sg_chain_buffer_dma_handle,
4301 &sg_descriptor->address);
4302 put_unaligned_le32((sg_count - num_sg_in_iu)
4303 * sizeof(*sg_descriptor),
4304 &sg_descriptor->length);
4305 put_unaligned_le32(CISS_SG_CHAIN,
4306 &sg_descriptor->flags);
4309 sg_descriptor = io_request->sg_chain_buffer;
4314 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4315 request->partial = chained;
4316 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4319 put_unaligned_le16(iu_length, &request->header.iu_length);
4320 request->num_sg_descriptors = num_sg_in_iu;
4325 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4328 struct scsi_cmnd *scmd;
4330 scmd = io_request->scmd;
4331 pqi_free_io_request(io_request);
4332 scsi_dma_unmap(scmd);
4333 pqi_scsi_done(scmd);
4336 static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4337 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4338 struct pqi_queue_group *queue_group)
4342 struct pqi_io_request *io_request;
4343 struct pqi_raid_path_request *request;
4345 io_request = pqi_alloc_io_request(ctrl_info);
4346 io_request->io_complete_callback = pqi_raid_io_complete;
4347 io_request->scmd = scmd;
4349 scmd->host_scribble = (unsigned char *)io_request;
4351 request = io_request->iu;
4353 offsetof(struct pqi_raid_path_request, sg_descriptors));
4355 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4356 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4357 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4358 put_unaligned_le16(io_request->index, &request->request_id);
4359 request->error_index = request->request_id;
4360 memcpy(request->lun_number, device->scsi3addr,
4361 sizeof(request->lun_number));
4363 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4364 memcpy(request->cdb, scmd->cmnd, cdb_length);
4366 switch (cdb_length) {
4371 /* No bytes in the Additional CDB bytes field */
4372 request->additional_cdb_bytes_usage =
4373 SOP_ADDITIONAL_CDB_BYTES_0;
4376 /* 4 bytes in the Additional cdb field */
4377 request->additional_cdb_bytes_usage =
4378 SOP_ADDITIONAL_CDB_BYTES_4;
4381 /* 8 bytes in the Additional cdb field */
4382 request->additional_cdb_bytes_usage =
4383 SOP_ADDITIONAL_CDB_BYTES_8;
4386 /* 12 bytes in the Additional cdb field */
4387 request->additional_cdb_bytes_usage =
4388 SOP_ADDITIONAL_CDB_BYTES_12;
4392 /* 16 bytes in the Additional cdb field */
4393 request->additional_cdb_bytes_usage =
4394 SOP_ADDITIONAL_CDB_BYTES_16;
4398 switch (scmd->sc_data_direction) {
4400 request->data_direction = SOP_READ_FLAG;
4402 case DMA_FROM_DEVICE:
4403 request->data_direction = SOP_WRITE_FLAG;
4406 request->data_direction = SOP_NO_DIRECTION_FLAG;
4408 case DMA_BIDIRECTIONAL:
4409 request->data_direction = SOP_BIDIRECTIONAL;
4412 dev_err(&ctrl_info->pci_dev->dev,
4413 "unknown data direction: %d\n",
4414 scmd->sc_data_direction);
4415 WARN_ON(scmd->sc_data_direction);
4419 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4421 pqi_free_io_request(io_request);
4422 return SCSI_MLQUEUE_HOST_BUSY;
4425 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4430 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4433 struct scsi_cmnd *scmd;
4435 scmd = io_request->scmd;
4436 scsi_dma_unmap(scmd);
4437 if (io_request->status == -EAGAIN)
4438 set_host_byte(scmd, DID_IMM_RETRY);
4439 pqi_free_io_request(io_request);
4440 pqi_scsi_done(scmd);
4443 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4444 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4445 struct pqi_queue_group *queue_group)
4447 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4448 scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4451 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4452 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4453 unsigned int cdb_length, struct pqi_queue_group *queue_group,
4454 struct pqi_encryption_info *encryption_info)
4457 struct pqi_io_request *io_request;
4458 struct pqi_aio_path_request *request;
4460 io_request = pqi_alloc_io_request(ctrl_info);
4461 io_request->io_complete_callback = pqi_aio_io_complete;
4462 io_request->scmd = scmd;
4464 scmd->host_scribble = (unsigned char *)io_request;
4466 request = io_request->iu;
4468 offsetof(struct pqi_raid_path_request, sg_descriptors));
4470 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4471 put_unaligned_le32(aio_handle, &request->nexus_id);
4472 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4473 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4474 put_unaligned_le16(io_request->index, &request->request_id);
4475 request->error_index = request->request_id;
4476 if (cdb_length > sizeof(request->cdb))
4477 cdb_length = sizeof(request->cdb);
4478 request->cdb_length = cdb_length;
4479 memcpy(request->cdb, cdb, cdb_length);
4481 switch (scmd->sc_data_direction) {
4483 request->data_direction = SOP_READ_FLAG;
4485 case DMA_FROM_DEVICE:
4486 request->data_direction = SOP_WRITE_FLAG;
4489 request->data_direction = SOP_NO_DIRECTION_FLAG;
4491 case DMA_BIDIRECTIONAL:
4492 request->data_direction = SOP_BIDIRECTIONAL;
4495 dev_err(&ctrl_info->pci_dev->dev,
4496 "unknown data direction: %d\n",
4497 scmd->sc_data_direction);
4498 WARN_ON(scmd->sc_data_direction);
4502 if (encryption_info) {
4503 request->encryption_enable = true;
4504 put_unaligned_le16(encryption_info->data_encryption_key_index,
4505 &request->data_encryption_key_index);
4506 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4507 &request->encrypt_tweak_lower);
4508 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4509 &request->encrypt_tweak_upper);
4512 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4514 pqi_free_io_request(io_request);
4515 return SCSI_MLQUEUE_HOST_BUSY;
4518 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4523 static int pqi_scsi_queue_command(struct Scsi_Host *shost,
4524 struct scsi_cmnd *scmd)
4527 struct pqi_ctrl_info *ctrl_info;
4528 struct pqi_scsi_dev *device;
4530 struct pqi_queue_group *queue_group;
4533 device = scmd->device->hostdata;
4534 ctrl_info = shost_to_hba(shost);
4536 if (pqi_ctrl_offline(ctrl_info)) {
4537 set_host_byte(scmd, DID_NO_CONNECT);
4538 pqi_scsi_done(scmd);
4543 * This is necessary because the SML doesn't zero out this field during
4548 hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4549 if (hwq >= ctrl_info->num_queue_groups)
4552 queue_group = &ctrl_info->queue_groups[hwq];
4554 if (pqi_is_logical_device(device)) {
4555 raid_bypassed = false;
4556 if (device->offload_enabled &&
4557 scmd->request->cmd_type == REQ_TYPE_FS) {
4558 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4561 rc == SCSI_MLQUEUE_HOST_BUSY ||
4562 rc == SAM_STAT_CHECK_CONDITION ||
4563 rc == SAM_STAT_RESERVATION_CONFLICT)
4564 raid_bypassed = true;
4567 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4570 if (device->aio_enabled)
4571 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4574 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4581 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
4584 struct completion *waiting = context;
4589 #define PQI_LUN_RESET_TIMEOUT_SECS 10
4591 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
4592 struct pqi_scsi_dev *device, struct completion *wait)
4595 unsigned int wait_secs = 0;
4598 if (wait_for_completion_io_timeout(wait,
4599 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
4604 pqi_check_ctrl_health(ctrl_info);
4605 if (pqi_ctrl_offline(ctrl_info)) {
4610 wait_secs += PQI_LUN_RESET_TIMEOUT_SECS;
4612 dev_err(&ctrl_info->pci_dev->dev,
4613 "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
4614 ctrl_info->scsi_host->host_no, device->bus,
4615 device->target, device->lun, wait_secs);
4621 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
4622 struct pqi_scsi_dev *device)
4625 struct pqi_io_request *io_request;
4626 DECLARE_COMPLETION_ONSTACK(wait);
4627 struct pqi_task_management_request *request;
4629 down(&ctrl_info->lun_reset_sem);
4631 io_request = pqi_alloc_io_request(ctrl_info);
4632 io_request->io_complete_callback = pqi_lun_reset_complete;
4633 io_request->context = &wait;
4635 request = io_request->iu;
4636 memset(request, 0, sizeof(*request));
4638 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
4639 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
4640 &request->header.iu_length);
4641 put_unaligned_le16(io_request->index, &request->request_id);
4642 memcpy(request->lun_number, device->scsi3addr,
4643 sizeof(request->lun_number));
4644 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
4646 pqi_start_io(ctrl_info,
4647 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4650 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
4652 rc = io_request->status;
4654 pqi_free_io_request(io_request);
4655 up(&ctrl_info->lun_reset_sem);
4660 /* Performs a reset at the LUN level. */
4662 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
4663 struct pqi_scsi_dev *device)
4667 pqi_check_ctrl_health(ctrl_info);
4668 if (pqi_ctrl_offline(ctrl_info))
4671 rc = pqi_lun_reset(ctrl_info, device);
4673 return rc == 0 ? SUCCESS : FAILED;
4676 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
4679 struct pqi_ctrl_info *ctrl_info;
4680 struct pqi_scsi_dev *device;
4682 ctrl_info = shost_to_hba(scmd->device->host);
4683 device = scmd->device->hostdata;
4685 dev_err(&ctrl_info->pci_dev->dev,
4686 "resetting scsi %d:%d:%d:%d\n",
4687 ctrl_info->scsi_host->host_no,
4688 device->bus, device->target, device->lun);
4690 rc = pqi_device_reset(ctrl_info, device);
4692 dev_err(&ctrl_info->pci_dev->dev,
4693 "reset of scsi %d:%d:%d:%d: %s\n",
4694 ctrl_info->scsi_host->host_no,
4695 device->bus, device->target, device->lun,
4696 rc == SUCCESS ? "SUCCESS" : "FAILED");
4701 static int pqi_slave_alloc(struct scsi_device *sdev)
4703 struct pqi_scsi_dev *device;
4704 unsigned long flags;
4705 struct pqi_ctrl_info *ctrl_info;
4706 struct scsi_target *starget;
4707 struct sas_rphy *rphy;
4709 ctrl_info = shost_to_hba(sdev->host);
4711 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4713 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
4714 starget = scsi_target(sdev);
4715 rphy = target_to_rphy(starget);
4716 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
4718 device->target = sdev_id(sdev);
4719 device->lun = sdev->lun;
4720 device->target_lun_valid = true;
4723 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
4724 sdev_id(sdev), sdev->lun);
4727 if (device && device->expose_device) {
4728 sdev->hostdata = device;
4729 device->sdev = sdev;
4730 if (device->queue_depth) {
4731 device->advertised_queue_depth = device->queue_depth;
4732 scsi_change_queue_depth(sdev,
4733 device->advertised_queue_depth);
4737 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
4742 static int pqi_slave_configure(struct scsi_device *sdev)
4744 struct pqi_scsi_dev *device;
4746 device = sdev->hostdata;
4747 if (!device->expose_device)
4748 sdev->no_uld_attach = true;
4753 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
4756 struct pci_dev *pci_dev;
4757 u32 subsystem_vendor;
4758 u32 subsystem_device;
4759 cciss_pci_info_struct pciinfo;
4764 pci_dev = ctrl_info->pci_dev;
4766 pciinfo.domain = pci_domain_nr(pci_dev->bus);
4767 pciinfo.bus = pci_dev->bus->number;
4768 pciinfo.dev_fn = pci_dev->devfn;
4769 subsystem_vendor = pci_dev->subsystem_vendor;
4770 subsystem_device = pci_dev->subsystem_device;
4771 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
4774 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
4780 static int pqi_getdrivver_ioctl(void __user *arg)
4787 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
4788 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
4790 if (copy_to_user(arg, &version, sizeof(version)))
4796 struct ciss_error_info {
4799 size_t sense_data_length;
4802 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
4803 struct ciss_error_info *ciss_error_info)
4805 int ciss_cmd_status;
4806 size_t sense_data_length;
4808 switch (pqi_error_info->data_out_result) {
4809 case PQI_DATA_IN_OUT_GOOD:
4810 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
4812 case PQI_DATA_IN_OUT_UNDERFLOW:
4813 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
4815 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
4816 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
4818 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
4819 case PQI_DATA_IN_OUT_BUFFER_ERROR:
4820 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
4821 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
4822 case PQI_DATA_IN_OUT_ERROR:
4823 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
4825 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
4826 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
4827 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
4828 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
4829 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
4830 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
4831 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
4832 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
4833 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
4834 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
4835 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
4837 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
4838 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
4840 case PQI_DATA_IN_OUT_ABORTED:
4841 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
4843 case PQI_DATA_IN_OUT_TIMEOUT:
4844 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
4847 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
4852 get_unaligned_le16(&pqi_error_info->sense_data_length);
4853 if (sense_data_length == 0)
4855 get_unaligned_le16(&pqi_error_info->response_data_length);
4856 if (sense_data_length)
4857 if (sense_data_length > sizeof(pqi_error_info->data))
4858 sense_data_length = sizeof(pqi_error_info->data);
4860 ciss_error_info->scsi_status = pqi_error_info->status;
4861 ciss_error_info->command_status = ciss_cmd_status;
4862 ciss_error_info->sense_data_length = sense_data_length;
4865 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
4868 char *kernel_buffer = NULL;
4870 size_t sense_data_length;
4871 IOCTL_Command_struct iocommand;
4872 struct pqi_raid_path_request request;
4873 struct pqi_raid_error_info pqi_error_info;
4874 struct ciss_error_info ciss_error_info;
4876 if (pqi_ctrl_offline(ctrl_info))
4880 if (!capable(CAP_SYS_RAWIO))
4882 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
4884 if (iocommand.buf_size < 1 &&
4885 iocommand.Request.Type.Direction != XFER_NONE)
4887 if (iocommand.Request.CDBLen > sizeof(request.cdb))
4889 if (iocommand.Request.Type.Type != TYPE_CMD)
4892 switch (iocommand.Request.Type.Direction) {
4901 if (iocommand.buf_size > 0) {
4902 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
4905 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4906 if (copy_from_user(kernel_buffer, iocommand.buf,
4907 iocommand.buf_size)) {
4912 memset(kernel_buffer, 0, iocommand.buf_size);
4916 memset(&request, 0, sizeof(request));
4918 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4919 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4920 PQI_REQUEST_HEADER_LENGTH;
4921 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
4922 sizeof(request.lun_number));
4923 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
4924 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
4926 switch (iocommand.Request.Type.Direction) {
4928 request.data_direction = SOP_NO_DIRECTION_FLAG;
4931 request.data_direction = SOP_WRITE_FLAG;
4934 request.data_direction = SOP_READ_FLAG;
4938 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4940 if (iocommand.buf_size > 0) {
4941 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
4943 rc = pqi_map_single(ctrl_info->pci_dev,
4944 &request.sg_descriptors[0], kernel_buffer,
4945 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4949 iu_length += sizeof(request.sg_descriptors[0]);
4952 put_unaligned_le16(iu_length, &request.header.iu_length);
4954 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4955 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
4957 if (iocommand.buf_size > 0)
4958 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
4959 PCI_DMA_BIDIRECTIONAL);
4961 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
4964 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
4965 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
4966 iocommand.error_info.CommandStatus =
4967 ciss_error_info.command_status;
4968 sense_data_length = ciss_error_info.sense_data_length;
4969 if (sense_data_length) {
4970 if (sense_data_length >
4971 sizeof(iocommand.error_info.SenseInfo))
4973 sizeof(iocommand.error_info.SenseInfo);
4974 memcpy(iocommand.error_info.SenseInfo,
4975 pqi_error_info.data, sense_data_length);
4976 iocommand.error_info.SenseLen = sense_data_length;
4980 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
4985 if (rc == 0 && iocommand.buf_size > 0 &&
4986 (iocommand.Request.Type.Direction & XFER_READ)) {
4987 if (copy_to_user(iocommand.buf, kernel_buffer,
4988 iocommand.buf_size)) {
4994 kfree(kernel_buffer);
4999 static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5002 struct pqi_ctrl_info *ctrl_info;
5004 ctrl_info = shost_to_hba(sdev->host);
5007 case CCISS_DEREGDISK:
5008 case CCISS_REGNEWDISK:
5010 rc = pqi_scan_scsi_devices(ctrl_info);
5012 case CCISS_GETPCIINFO:
5013 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5015 case CCISS_GETDRIVVER:
5016 rc = pqi_getdrivver_ioctl(arg);
5018 case CCISS_PASSTHRU:
5019 rc = pqi_passthru_ioctl(ctrl_info, arg);
5029 static ssize_t pqi_version_show(struct device *dev,
5030 struct device_attribute *attr, char *buffer)
5033 struct Scsi_Host *shost;
5034 struct pqi_ctrl_info *ctrl_info;
5036 shost = class_to_shost(dev);
5037 ctrl_info = shost_to_hba(shost);
5039 count += snprintf(buffer + count, PAGE_SIZE - count,
5040 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5042 count += snprintf(buffer + count, PAGE_SIZE - count,
5043 "firmware: %s\n", ctrl_info->firmware_version);
5048 static ssize_t pqi_host_rescan_store(struct device *dev,
5049 struct device_attribute *attr, const char *buffer, size_t count)
5051 struct Scsi_Host *shost = class_to_shost(dev);
5053 pqi_scan_start(shost);
5058 static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
5059 static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
5061 static struct device_attribute *pqi_shost_attrs[] = {
5067 static ssize_t pqi_sas_address_show(struct device *dev,
5068 struct device_attribute *attr, char *buffer)
5070 struct pqi_ctrl_info *ctrl_info;
5071 struct scsi_device *sdev;
5072 struct pqi_scsi_dev *device;
5073 unsigned long flags;
5076 sdev = to_scsi_device(dev);
5077 ctrl_info = shost_to_hba(sdev->host);
5079 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5081 device = sdev->hostdata;
5082 if (pqi_is_logical_device(device)) {
5083 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5087 sas_address = device->sas_address;
5089 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5091 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5094 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5095 struct device_attribute *attr, char *buffer)
5097 struct pqi_ctrl_info *ctrl_info;
5098 struct scsi_device *sdev;
5099 struct pqi_scsi_dev *device;
5100 unsigned long flags;
5102 sdev = to_scsi_device(dev);
5103 ctrl_info = shost_to_hba(sdev->host);
5105 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5107 device = sdev->hostdata;
5108 buffer[0] = device->offload_enabled ? '1' : '0';
5112 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5117 static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
5118 static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
5119 pqi_ssd_smart_path_enabled_show, NULL);
5121 static struct device_attribute *pqi_sdev_attrs[] = {
5122 &dev_attr_sas_address,
5123 &dev_attr_ssd_smart_path_enabled,
5127 static struct scsi_host_template pqi_driver_template = {
5128 .module = THIS_MODULE,
5129 .name = DRIVER_NAME_SHORT,
5130 .proc_name = DRIVER_NAME_SHORT,
5131 .queuecommand = pqi_scsi_queue_command,
5132 .scan_start = pqi_scan_start,
5133 .scan_finished = pqi_scan_finished,
5135 .use_clustering = ENABLE_CLUSTERING,
5136 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5138 .slave_alloc = pqi_slave_alloc,
5139 .slave_configure = pqi_slave_configure,
5140 .sdev_attrs = pqi_sdev_attrs,
5141 .shost_attrs = pqi_shost_attrs,
5144 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5147 struct Scsi_Host *shost;
5149 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5151 dev_err(&ctrl_info->pci_dev->dev,
5152 "scsi_host_alloc failed for controller %u\n",
5153 ctrl_info->ctrl_id);
5158 shost->n_io_port = 0;
5159 shost->this_id = -1;
5160 shost->max_channel = PQI_MAX_BUS;
5161 shost->max_cmd_len = MAX_COMMAND_SIZE;
5162 shost->max_lun = ~0;
5164 shost->max_sectors = ctrl_info->max_sectors;
5165 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5166 shost->cmd_per_lun = shost->can_queue;
5167 shost->sg_tablesize = ctrl_info->sg_tablesize;
5168 shost->transportt = pqi_sas_transport_template;
5169 shost->irq = ctrl_info->msix_vectors[0];
5170 shost->unique_id = shost->irq;
5171 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5172 shost->hostdata[0] = (unsigned long)ctrl_info;
5174 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5176 dev_err(&ctrl_info->pci_dev->dev,
5177 "scsi_add_host failed for controller %u\n",
5178 ctrl_info->ctrl_id);
5182 rc = pqi_add_sas_host(shost, ctrl_info);
5184 dev_err(&ctrl_info->pci_dev->dev,
5185 "add SAS host failed for controller %u\n",
5186 ctrl_info->ctrl_id);
5190 ctrl_info->scsi_host = shost;
5195 scsi_remove_host(shost);
5197 scsi_host_put(shost);
5202 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5204 struct Scsi_Host *shost;
5206 pqi_delete_sas_host(ctrl_info);
5208 shost = ctrl_info->scsi_host;
5212 scsi_remove_host(shost);
5213 scsi_host_put(shost);
5216 #define PQI_RESET_ACTION_RESET 0x1
5218 #define PQI_RESET_TYPE_NO_RESET 0x0
5219 #define PQI_RESET_TYPE_SOFT_RESET 0x1
5220 #define PQI_RESET_TYPE_FIRM_RESET 0x2
5221 #define PQI_RESET_TYPE_HARD_RESET 0x3
5223 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5228 reset_params = (PQI_RESET_ACTION_RESET << 5) |
5229 PQI_RESET_TYPE_HARD_RESET;
5231 writel(reset_params,
5232 &ctrl_info->pqi_registers->device_reset);
5234 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5236 dev_err(&ctrl_info->pci_dev->dev,
5237 "PQI reset failed\n");
5242 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5245 struct bmic_identify_controller *identify;
5247 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5251 rc = pqi_identify_controller(ctrl_info, identify);
5255 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5256 sizeof(identify->firmware_version));
5257 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5258 snprintf(ctrl_info->firmware_version +
5259 strlen(ctrl_info->firmware_version),
5260 sizeof(ctrl_info->firmware_version),
5261 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5269 static int pqi_kdump_init(struct pqi_ctrl_info *ctrl_info)
5271 if (!sis_is_firmware_running(ctrl_info))
5274 if (pqi_get_ctrl_mode(ctrl_info) == PQI_MODE) {
5275 sis_disable_msix(ctrl_info);
5276 if (pqi_reset(ctrl_info) == 0)
5277 sis_reenable_sis_mode(ctrl_info);
5283 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5287 if (reset_devices) {
5288 rc = pqi_kdump_init(ctrl_info);
5294 * When the controller comes out of reset, it is always running
5295 * in legacy SIS mode. This is so that it can be compatible
5296 * with legacy drivers shipped with OSes. So we have to talk
5297 * to it using SIS commands at first. Once we are satisified
5298 * that the controller supports PQI, we transition it into PQI
5303 * Wait until the controller is ready to start accepting SIS
5306 rc = sis_wait_for_ctrl_ready(ctrl_info);
5308 dev_err(&ctrl_info->pci_dev->dev,
5309 "error initializing SIS interface\n");
5314 * Get the controller properties. This allows us to determine
5315 * whether or not it supports PQI mode.
5317 rc = sis_get_ctrl_properties(ctrl_info);
5319 dev_err(&ctrl_info->pci_dev->dev,
5320 "error obtaining controller properties\n");
5324 rc = sis_get_pqi_capabilities(ctrl_info);
5326 dev_err(&ctrl_info->pci_dev->dev,
5327 "error obtaining controller capabilities\n");
5331 if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
5332 ctrl_info->max_outstanding_requests =
5333 PQI_MAX_OUTSTANDING_REQUESTS;
5335 pqi_calculate_io_resources(ctrl_info);
5337 rc = pqi_alloc_error_buffer(ctrl_info);
5339 dev_err(&ctrl_info->pci_dev->dev,
5340 "failed to allocate PQI error buffer\n");
5345 * If the function we are about to call succeeds, the
5346 * controller will transition from legacy SIS mode
5349 rc = sis_init_base_struct_addr(ctrl_info);
5351 dev_err(&ctrl_info->pci_dev->dev,
5352 "error initializing PQI mode\n");
5356 /* Wait for the controller to complete the SIS -> PQI transition. */
5357 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5359 dev_err(&ctrl_info->pci_dev->dev,
5360 "transition to PQI mode failed\n");
5364 /* From here on, we are running in PQI mode. */
5365 ctrl_info->pqi_mode_enabled = true;
5366 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
5368 rc = pqi_alloc_admin_queues(ctrl_info);
5370 dev_err(&ctrl_info->pci_dev->dev,
5371 "error allocating admin queues\n");
5375 rc = pqi_create_admin_queues(ctrl_info);
5377 dev_err(&ctrl_info->pci_dev->dev,
5378 "error creating admin queues\n");
5382 rc = pqi_report_device_capability(ctrl_info);
5384 dev_err(&ctrl_info->pci_dev->dev,
5385 "obtaining device capability failed\n");
5389 rc = pqi_validate_device_capability(ctrl_info);
5393 pqi_calculate_queue_resources(ctrl_info);
5395 rc = pqi_enable_msix_interrupts(ctrl_info);
5399 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5400 ctrl_info->max_msix_vectors =
5401 ctrl_info->num_msix_vectors_enabled;
5402 pqi_calculate_queue_resources(ctrl_info);
5405 rc = pqi_alloc_io_resources(ctrl_info);
5409 rc = pqi_alloc_operational_queues(ctrl_info);
5413 pqi_init_operational_queues(ctrl_info);
5415 rc = pqi_request_irqs(ctrl_info);
5419 pqi_irq_set_affinity_hint(ctrl_info);
5421 rc = pqi_create_queues(ctrl_info);
5425 sis_enable_msix(ctrl_info);
5427 rc = pqi_configure_events(ctrl_info);
5429 dev_err(&ctrl_info->pci_dev->dev,
5430 "error configuring events\n");
5434 pqi_start_heartbeat_timer(ctrl_info);
5436 ctrl_info->controller_online = true;
5438 /* Register with the SCSI subsystem. */
5439 rc = pqi_register_scsi(ctrl_info);
5443 rc = pqi_get_ctrl_firmware_version(ctrl_info);
5445 dev_err(&ctrl_info->pci_dev->dev,
5446 "error obtaining firmware version\n");
5450 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5452 dev_err(&ctrl_info->pci_dev->dev,
5453 "error updating host wellness\n");
5457 pqi_schedule_update_time_worker(ctrl_info);
5459 pqi_scan_scsi_devices(ctrl_info);
5464 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
5469 rc = pci_enable_device(ctrl_info->pci_dev);
5471 dev_err(&ctrl_info->pci_dev->dev,
5472 "failed to enable PCI device\n");
5476 if (sizeof(dma_addr_t) > 4)
5477 mask = DMA_BIT_MASK(64);
5479 mask = DMA_BIT_MASK(32);
5481 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
5483 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
5484 goto disable_device;
5487 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
5489 dev_err(&ctrl_info->pci_dev->dev,
5490 "failed to obtain PCI resources\n");
5491 goto disable_device;
5494 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
5495 ctrl_info->pci_dev, 0),
5496 sizeof(struct pqi_ctrl_registers));
5497 if (!ctrl_info->iomem_base) {
5498 dev_err(&ctrl_info->pci_dev->dev,
5499 "failed to map memory for controller registers\n");
5501 goto release_regions;
5504 ctrl_info->registers = ctrl_info->iomem_base;
5505 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
5507 /* Enable bus mastering. */
5508 pci_set_master(ctrl_info->pci_dev);
5510 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
5515 pci_release_regions(ctrl_info->pci_dev);
5517 pci_disable_device(ctrl_info->pci_dev);
5522 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
5524 iounmap(ctrl_info->iomem_base);
5525 pci_release_regions(ctrl_info->pci_dev);
5526 pci_disable_device(ctrl_info->pci_dev);
5527 pci_set_drvdata(ctrl_info->pci_dev, NULL);
5530 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
5532 struct pqi_ctrl_info *ctrl_info;
5534 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
5535 GFP_KERNEL, numa_node);
5539 mutex_init(&ctrl_info->scan_mutex);
5541 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
5542 spin_lock_init(&ctrl_info->scsi_device_list_lock);
5544 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
5545 atomic_set(&ctrl_info->num_interrupts, 0);
5547 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
5548 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
5550 sema_init(&ctrl_info->sync_request_sem,
5551 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
5552 sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
5554 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
5555 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
5560 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
5565 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
5567 pqi_irq_unset_affinity_hint(ctrl_info);
5568 pqi_free_irqs(ctrl_info);
5569 if (ctrl_info->num_msix_vectors_enabled)
5570 pci_disable_msix(ctrl_info->pci_dev);
5573 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
5575 pqi_stop_heartbeat_timer(ctrl_info);
5576 pqi_free_interrupts(ctrl_info);
5577 if (ctrl_info->queue_memory_base)
5578 dma_free_coherent(&ctrl_info->pci_dev->dev,
5579 ctrl_info->queue_memory_length,
5580 ctrl_info->queue_memory_base,
5581 ctrl_info->queue_memory_base_dma_handle);
5582 if (ctrl_info->admin_queue_memory_base)
5583 dma_free_coherent(&ctrl_info->pci_dev->dev,
5584 ctrl_info->admin_queue_memory_length,
5585 ctrl_info->admin_queue_memory_base,
5586 ctrl_info->admin_queue_memory_base_dma_handle);
5587 pqi_free_all_io_requests(ctrl_info);
5588 if (ctrl_info->error_buffer)
5589 dma_free_coherent(&ctrl_info->pci_dev->dev,
5590 ctrl_info->error_buffer_length,
5591 ctrl_info->error_buffer,
5592 ctrl_info->error_buffer_dma_handle);
5593 if (ctrl_info->iomem_base)
5594 pqi_cleanup_pci_init(ctrl_info);
5595 pqi_free_ctrl_info(ctrl_info);
5598 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
5600 cancel_delayed_work_sync(&ctrl_info->rescan_work);
5601 cancel_delayed_work_sync(&ctrl_info->update_time_work);
5602 pqi_remove_all_scsi_devices(ctrl_info);
5603 pqi_unregister_scsi(ctrl_info);
5605 if (ctrl_info->pqi_mode_enabled) {
5606 sis_disable_msix(ctrl_info);
5607 if (pqi_reset(ctrl_info) == 0)
5608 sis_reenable_sis_mode(ctrl_info);
5610 pqi_free_ctrl_resources(ctrl_info);
5613 static void pqi_print_ctrl_info(struct pci_dev *pdev,
5614 const struct pci_device_id *id)
5616 char *ctrl_description;
5618 if (id->driver_data) {
5619 ctrl_description = (char *)id->driver_data;
5621 switch (id->subvendor) {
5622 case PCI_VENDOR_ID_HP:
5623 ctrl_description = hpe_branded_controller;
5625 case PCI_VENDOR_ID_ADAPTEC2:
5627 ctrl_description = microsemi_branded_controller;
5632 dev_info(&pdev->dev, "%s found\n", ctrl_description);
5635 static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5639 struct pqi_ctrl_info *ctrl_info;
5641 pqi_print_ctrl_info(pdev, id);
5643 if (pqi_disable_device_id_wildcards &&
5644 id->subvendor == PCI_ANY_ID &&
5645 id->subdevice == PCI_ANY_ID) {
5646 dev_warn(&pdev->dev,
5647 "controller not probed because device ID wildcards are disabled\n");
5651 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
5652 dev_warn(&pdev->dev,
5653 "controller device ID matched using wildcards\n");
5655 node = dev_to_node(&pdev->dev);
5656 if (node == NUMA_NO_NODE)
5657 set_dev_node(&pdev->dev, 0);
5659 ctrl_info = pqi_alloc_ctrl_info(node);
5662 "failed to allocate controller info block\n");
5666 ctrl_info->pci_dev = pdev;
5668 rc = pqi_pci_init(ctrl_info);
5672 rc = pqi_ctrl_init(ctrl_info);
5679 pqi_remove_ctrl(ctrl_info);
5684 static void pqi_pci_remove(struct pci_dev *pdev)
5686 struct pqi_ctrl_info *ctrl_info;
5688 ctrl_info = pci_get_drvdata(pdev);
5692 pqi_remove_ctrl(ctrl_info);
5695 static void pqi_shutdown(struct pci_dev *pdev)
5698 struct pqi_ctrl_info *ctrl_info;
5700 ctrl_info = pci_get_drvdata(pdev);
5705 * Write all data in the controller's battery-backed cache to
5708 rc = pqi_flush_cache(ctrl_info);
5713 dev_warn(&pdev->dev,
5714 "unable to flush controller cache\n");
5717 /* Define the PCI IDs for the controllers that we support. */
5718 static const struct pci_device_id pqi_pci_id_table[] = {
5720 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5721 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
5724 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5725 PCI_VENDOR_ID_HP, 0x0600)
5728 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5729 PCI_VENDOR_ID_HP, 0x0601)
5732 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5733 PCI_VENDOR_ID_HP, 0x0602)
5736 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5737 PCI_VENDOR_ID_HP, 0x0603)
5740 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5741 PCI_VENDOR_ID_HP, 0x0650)
5744 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5745 PCI_VENDOR_ID_HP, 0x0651)
5748 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5749 PCI_VENDOR_ID_HP, 0x0652)
5752 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5753 PCI_VENDOR_ID_HP, 0x0653)
5756 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5757 PCI_VENDOR_ID_HP, 0x0654)
5760 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5761 PCI_VENDOR_ID_HP, 0x0655)
5764 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5765 PCI_VENDOR_ID_HP, 0x0700)
5768 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5769 PCI_VENDOR_ID_HP, 0x0701)
5772 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5773 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
5776 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5777 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
5780 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5781 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
5784 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5785 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
5788 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5789 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
5792 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5793 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
5796 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5797 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
5800 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5801 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
5804 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5805 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
5808 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5809 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
5812 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5813 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
5816 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5817 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
5820 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5821 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
5824 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5825 PCI_VENDOR_ID_HP, 0x1001)
5828 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5829 PCI_VENDOR_ID_HP, 0x1100)
5832 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5833 PCI_VENDOR_ID_HP, 0x1101)
5836 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5837 PCI_VENDOR_ID_HP, 0x1102)
5840 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5841 PCI_VENDOR_ID_HP, 0x1150)
5844 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5845 PCI_ANY_ID, PCI_ANY_ID)
5850 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
5852 static struct pci_driver pqi_pci_driver = {
5853 .name = DRIVER_NAME_SHORT,
5854 .id_table = pqi_pci_id_table,
5855 .probe = pqi_pci_probe,
5856 .remove = pqi_pci_remove,
5857 .shutdown = pqi_shutdown,
5860 static int __init pqi_init(void)
5864 pr_info(DRIVER_NAME "\n");
5866 pqi_sas_transport_template =
5867 sas_attach_transport(&pqi_sas_transport_functions);
5868 if (!pqi_sas_transport_template)
5871 rc = pci_register_driver(&pqi_pci_driver);
5873 sas_release_transport(pqi_sas_transport_template);
5878 static void __exit pqi_cleanup(void)
5880 pci_unregister_driver(&pqi_pci_driver);
5881 sas_release_transport(pqi_sas_transport_template);
5884 module_init(pqi_init);
5885 module_exit(pqi_cleanup);
5887 static void __attribute__((unused)) verify_structures(void)
5889 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5890 sis_host_to_ctrl_doorbell) != 0x20);
5891 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5892 sis_interrupt_mask) != 0x34);
5893 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5894 sis_ctrl_to_host_doorbell) != 0x9c);
5895 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5896 sis_ctrl_to_host_doorbell_clear) != 0xa0);
5897 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5898 sis_driver_scratch) != 0xb0);
5899 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5900 sis_firmware_status) != 0xbc);
5901 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5902 sis_mailbox) != 0x1000);
5903 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5904 pqi_registers) != 0x4000);
5906 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5908 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5910 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5911 response_queue_id) != 0x4);
5912 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5914 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
5916 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5918 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5919 service_response) != 0x1);
5920 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5921 data_present) != 0x2);
5922 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5924 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5925 residual_count) != 0x4);
5926 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5927 data_length) != 0x8);
5928 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5930 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5932 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
5934 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5935 data_in_result) != 0x0);
5936 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5937 data_out_result) != 0x1);
5938 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5940 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5942 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5943 status_qualifier) != 0x6);
5944 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5945 sense_data_length) != 0x8);
5946 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5947 response_data_length) != 0xa);
5948 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5949 data_in_transferred) != 0xc);
5950 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5951 data_out_transferred) != 0x10);
5952 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5954 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
5956 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5958 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5959 function_and_status_code) != 0x8);
5960 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5961 max_admin_iq_elements) != 0x10);
5962 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5963 max_admin_oq_elements) != 0x11);
5964 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5965 admin_iq_element_length) != 0x12);
5966 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5967 admin_oq_element_length) != 0x13);
5968 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5969 max_reset_timeout) != 0x14);
5970 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5971 legacy_intx_status) != 0x18);
5972 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5973 legacy_intx_mask_set) != 0x1c);
5974 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5975 legacy_intx_mask_clear) != 0x20);
5976 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5977 device_status) != 0x40);
5978 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5979 admin_iq_pi_offset) != 0x48);
5980 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5981 admin_oq_ci_offset) != 0x50);
5982 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5983 admin_iq_element_array_addr) != 0x58);
5984 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5985 admin_oq_element_array_addr) != 0x60);
5986 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5987 admin_iq_ci_addr) != 0x68);
5988 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5989 admin_oq_pi_addr) != 0x70);
5990 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5991 admin_iq_num_elements) != 0x78);
5992 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5993 admin_oq_num_elements) != 0x79);
5994 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5995 admin_queue_int_msg_num) != 0x7a);
5996 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5997 device_error) != 0x80);
5998 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5999 error_details) != 0x88);
6000 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6001 device_reset) != 0x90);
6002 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6003 power_action) != 0x94);
6004 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
6006 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6007 header.iu_type) != 0);
6008 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6009 header.iu_length) != 2);
6010 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6011 header.work_area) != 6);
6012 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6014 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6015 function_code) != 10);
6016 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6017 data.report_device_capability.buffer_length) != 44);
6018 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6019 data.report_device_capability.sg_descriptor) != 48);
6020 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6021 data.create_operational_iq.queue_id) != 12);
6022 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6023 data.create_operational_iq.element_array_addr) != 16);
6024 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6025 data.create_operational_iq.ci_addr) != 24);
6026 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6027 data.create_operational_iq.num_elements) != 32);
6028 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6029 data.create_operational_iq.element_length) != 34);
6030 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6031 data.create_operational_iq.queue_protocol) != 36);
6032 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6033 data.create_operational_oq.queue_id) != 12);
6034 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6035 data.create_operational_oq.element_array_addr) != 16);
6036 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6037 data.create_operational_oq.pi_addr) != 24);
6038 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6039 data.create_operational_oq.num_elements) != 32);
6040 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6041 data.create_operational_oq.element_length) != 34);
6042 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6043 data.create_operational_oq.queue_protocol) != 36);
6044 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6045 data.create_operational_oq.int_msg_num) != 40);
6046 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6047 data.create_operational_oq.coalescing_count) != 42);
6048 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6049 data.create_operational_oq.min_coalescing_time) != 44);
6050 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6051 data.create_operational_oq.max_coalescing_time) != 48);
6052 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6053 data.delete_operational_queue.queue_id) != 12);
6054 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6055 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6056 data.create_operational_iq) != 64 - 11);
6057 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6058 data.create_operational_oq) != 64 - 11);
6059 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6060 data.delete_operational_queue) != 64 - 11);
6062 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6063 header.iu_type) != 0);
6064 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6065 header.iu_length) != 2);
6066 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6067 header.work_area) != 6);
6068 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6070 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6071 function_code) != 10);
6072 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6074 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6075 data.create_operational_iq.status_descriptor) != 12);
6076 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6077 data.create_operational_iq.iq_pi_offset) != 16);
6078 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6079 data.create_operational_oq.status_descriptor) != 12);
6080 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6081 data.create_operational_oq.oq_ci_offset) != 16);
6082 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6084 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6085 header.iu_type) != 0);
6086 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6087 header.iu_length) != 2);
6088 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6089 header.response_queue_id) != 4);
6090 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6091 header.work_area) != 6);
6092 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6094 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6096 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6097 buffer_length) != 12);
6098 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6100 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6101 protocol_specific) != 24);
6102 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6103 error_index) != 27);
6104 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6106 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6107 sg_descriptors) != 64);
6108 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6109 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6111 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6112 header.iu_type) != 0);
6113 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6114 header.iu_length) != 2);
6115 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6116 header.response_queue_id) != 4);
6117 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6118 header.work_area) != 6);
6119 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6121 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6123 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6124 buffer_length) != 16);
6125 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6126 data_encryption_key_index) != 22);
6127 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6128 encrypt_tweak_lower) != 24);
6129 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6130 encrypt_tweak_upper) != 28);
6131 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6133 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6134 error_index) != 48);
6135 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6136 num_sg_descriptors) != 50);
6137 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6139 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6141 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6142 sg_descriptors) != 64);
6143 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6144 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6146 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6147 header.iu_type) != 0);
6148 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6149 header.iu_length) != 2);
6150 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6152 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6153 error_index) != 10);
6155 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6156 header.iu_type) != 0);
6157 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6158 header.iu_length) != 2);
6159 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6160 header.response_queue_id) != 4);
6161 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6163 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6164 data.report_event_configuration.buffer_length) != 12);
6165 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6166 data.report_event_configuration.sg_descriptors) != 16);
6167 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6168 data.set_event_configuration.global_event_oq_id) != 10);
6169 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6170 data.set_event_configuration.buffer_length) != 12);
6171 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6172 data.set_event_configuration.sg_descriptors) != 16);
6174 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6175 max_inbound_iu_length) != 6);
6176 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6177 max_outbound_iu_length) != 14);
6178 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6180 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6182 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6183 iq_arbitration_priority_support_bitmask) != 8);
6184 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6185 maximum_aw_a) != 9);
6186 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6187 maximum_aw_b) != 10);
6188 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6189 maximum_aw_c) != 11);
6190 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6191 max_inbound_queues) != 16);
6192 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6193 max_elements_per_iq) != 18);
6194 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6195 max_iq_element_length) != 24);
6196 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6197 min_iq_element_length) != 26);
6198 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6199 max_outbound_queues) != 30);
6200 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6201 max_elements_per_oq) != 32);
6202 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6203 intr_coalescing_time_granularity) != 34);
6204 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6205 max_oq_element_length) != 36);
6206 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6207 min_oq_element_length) != 38);
6208 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6209 iu_layer_descriptors) != 64);
6210 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6212 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6214 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6216 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
6218 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6219 num_event_descriptors) != 2);
6220 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6223 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6224 header.iu_type) != 0);
6225 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6226 header.iu_length) != 2);
6227 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6229 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6231 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6232 additional_event_id) != 12);
6233 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6235 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
6237 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6238 header.iu_type) != 0);
6239 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6240 header.iu_length) != 2);
6241 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6243 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6245 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6246 additional_event_id) != 12);
6247 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
6249 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6250 header.iu_type) != 0);
6251 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6252 header.iu_length) != 2);
6253 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6255 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6257 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6259 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6260 protocol_specific) != 24);
6261 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6262 outbound_queue_id_to_manage) != 26);
6263 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6264 request_id_to_manage) != 28);
6265 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6266 task_management_function) != 30);
6267 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
6269 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6270 header.iu_type) != 0);
6271 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6272 header.iu_length) != 2);
6273 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6275 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6277 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6278 additional_response_info) != 12);
6279 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6280 response_code) != 15);
6281 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
6283 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6284 configured_logical_drive_count) != 0);
6285 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6286 configuration_signature) != 1);
6287 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6288 firmware_version) != 5);
6289 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6290 extended_logical_unit_count) != 154);
6291 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6292 firmware_build_number) != 190);
6293 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6294 controller_mode) != 292);
6296 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
6297 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
6298 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
6299 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6300 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
6301 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6302 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
6303 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
6304 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6305 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
6306 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
6307 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6309 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);