2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016-2017 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/pci.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/rtc.h>
26 #include <linux/bcd.h>
27 #include <linux/reboot.h>
28 #include <linux/cciss_ioctl.h>
29 #include <linux/blk-mq-pci.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_transport_sas.h>
35 #include <asm/unaligned.h>
37 #include "smartpqi_sis.h"
39 #if !defined(BUILD_TIMESTAMP)
40 #define BUILD_TIMESTAMP
43 #define DRIVER_VERSION "1.1.2-125"
44 #define DRIVER_MAJOR 1
45 #define DRIVER_MINOR 1
46 #define DRIVER_RELEASE 2
47 #define DRIVER_REVISION 125
49 #define DRIVER_NAME "Microsemi PQI Driver (v" \
50 DRIVER_VERSION BUILD_TIMESTAMP ")"
51 #define DRIVER_NAME_SHORT "smartpqi"
53 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
55 MODULE_AUTHOR("Microsemi");
56 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
58 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
59 MODULE_VERSION(DRIVER_VERSION);
60 MODULE_LICENSE("GPL");
62 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
63 static void pqi_ctrl_offline_worker(struct work_struct *work);
64 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
65 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
66 static void pqi_scan_start(struct Scsi_Host *shost);
67 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
68 struct pqi_queue_group *queue_group, enum pqi_io_path path,
69 struct pqi_io_request *io_request);
70 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
71 struct pqi_iu_header *request, unsigned int flags,
72 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
73 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
75 unsigned int cdb_length, struct pqi_queue_group *queue_group,
76 struct pqi_encryption_info *encryption_info, bool raid_bypass);
78 /* for flags argument to pqi_submit_raid_request_synchronous() */
79 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
81 static struct scsi_transport_template *pqi_sas_transport_template;
83 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
85 enum pqi_lockup_action {
91 static enum pqi_lockup_action pqi_lockup_action = NONE;
94 enum pqi_lockup_action action;
96 } pqi_lockup_actions[] = {
111 static unsigned int pqi_supported_event_types[] = {
112 PQI_EVENT_TYPE_HOTPLUG,
113 PQI_EVENT_TYPE_HARDWARE,
114 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
115 PQI_EVENT_TYPE_LOGICAL_DEVICE,
116 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
117 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
120 static int pqi_disable_device_id_wildcards;
121 module_param_named(disable_device_id_wildcards,
122 pqi_disable_device_id_wildcards, int, 0644);
123 MODULE_PARM_DESC(disable_device_id_wildcards,
124 "Disable device ID wildcards.");
126 static int pqi_disable_heartbeat;
127 module_param_named(disable_heartbeat,
128 pqi_disable_heartbeat, int, 0644);
129 MODULE_PARM_DESC(disable_heartbeat,
130 "Disable heartbeat.");
132 static int pqi_disable_ctrl_shutdown;
133 module_param_named(disable_ctrl_shutdown,
134 pqi_disable_ctrl_shutdown, int, 0644);
135 MODULE_PARM_DESC(disable_ctrl_shutdown,
136 "Disable controller shutdown when controller locked up.");
138 static char *pqi_lockup_action_param;
139 module_param_named(lockup_action,
140 pqi_lockup_action_param, charp, 0644);
141 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
142 "\t\tSupported: none, reboot, panic\n"
143 "\t\tDefault: none");
145 static char *raid_levels[] = {
155 static char *pqi_raid_level_to_string(u8 raid_level)
157 if (raid_level < ARRAY_SIZE(raid_levels))
158 return raid_levels[raid_level];
160 return "RAID UNKNOWN";
165 #define SA_RAID_1 2 /* also used for RAID 10 */
166 #define SA_RAID_5 3 /* also used for RAID 50 */
168 #define SA_RAID_6 5 /* also used for RAID 60 */
169 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
170 #define SA_RAID_MAX SA_RAID_ADM
171 #define SA_RAID_UNKNOWN 0xff
173 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
175 pqi_prep_for_scsi_done(scmd);
176 scmd->scsi_done(scmd);
179 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
181 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
184 static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
186 void *hostdata = shost_priv(shost);
188 return *((struct pqi_ctrl_info **)hostdata);
191 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
193 return !device->is_physical_device;
196 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
198 return scsi3addr[2] != 0;
201 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
203 return !ctrl_info->controller_online;
206 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
208 if (ctrl_info->controller_online)
209 if (!sis_is_firmware_running(ctrl_info))
210 pqi_take_ctrl_offline(ctrl_info);
213 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
215 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
218 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
219 struct pqi_ctrl_info *ctrl_info)
221 return sis_read_driver_scratch(ctrl_info);
224 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
225 enum pqi_ctrl_mode mode)
227 sis_write_driver_scratch(ctrl_info, mode);
230 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
232 ctrl_info->block_requests = true;
233 scsi_block_requests(ctrl_info->scsi_host);
236 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
238 ctrl_info->block_requests = false;
239 wake_up_all(&ctrl_info->block_requests_wait);
240 pqi_retry_raid_bypass_requests(ctrl_info);
241 scsi_unblock_requests(ctrl_info->scsi_host);
244 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
246 return ctrl_info->block_requests;
249 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
250 unsigned long timeout_msecs)
252 unsigned long remaining_msecs;
254 if (!pqi_ctrl_blocked(ctrl_info))
255 return timeout_msecs;
257 atomic_inc(&ctrl_info->num_blocked_threads);
259 if (timeout_msecs == NO_TIMEOUT) {
260 wait_event(ctrl_info->block_requests_wait,
261 !pqi_ctrl_blocked(ctrl_info));
262 remaining_msecs = timeout_msecs;
264 unsigned long remaining_jiffies;
267 wait_event_timeout(ctrl_info->block_requests_wait,
268 !pqi_ctrl_blocked(ctrl_info),
269 msecs_to_jiffies(timeout_msecs));
270 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
273 atomic_dec(&ctrl_info->num_blocked_threads);
275 return remaining_msecs;
278 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
280 atomic_inc(&ctrl_info->num_busy_threads);
283 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
285 atomic_dec(&ctrl_info->num_busy_threads);
288 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
290 while (atomic_read(&ctrl_info->num_busy_threads) >
291 atomic_read(&ctrl_info->num_blocked_threads))
292 usleep_range(1000, 2000);
295 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
297 return device->device_offline;
300 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
302 device->in_reset = true;
305 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
307 device->in_reset = false;
310 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
312 return device->in_reset;
315 static inline void pqi_schedule_rescan_worker_with_delay(
316 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
318 if (pqi_ctrl_offline(ctrl_info))
321 schedule_delayed_work(&ctrl_info->rescan_work, delay);
324 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
326 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
329 #define PQI_RESCAN_WORK_DELAY (10 * HZ)
331 static inline void pqi_schedule_rescan_worker_delayed(
332 struct pqi_ctrl_info *ctrl_info)
334 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
337 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
339 cancel_delayed_work_sync(&ctrl_info->rescan_work);
342 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
344 if (!ctrl_info->heartbeat_counter)
347 return readl(ctrl_info->heartbeat_counter);
350 static int pqi_map_single(struct pci_dev *pci_dev,
351 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
352 size_t buffer_length, int data_direction)
354 dma_addr_t bus_address;
356 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
359 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
361 if (pci_dma_mapping_error(pci_dev, bus_address))
364 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
365 put_unaligned_le32(buffer_length, &sg_descriptor->length);
366 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
371 static void pqi_pci_unmap(struct pci_dev *pci_dev,
372 struct pqi_sg_descriptor *descriptors, int num_descriptors,
377 if (data_direction == PCI_DMA_NONE)
380 for (i = 0; i < num_descriptors; i++)
381 pci_unmap_single(pci_dev,
382 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
383 get_unaligned_le32(&descriptors[i].length),
387 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
388 struct pqi_raid_path_request *request, u8 cmd,
389 u8 *scsi3addr, void *buffer, size_t buffer_length,
390 u16 vpd_page, int *pci_direction)
395 memset(request, 0, sizeof(*request));
397 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
398 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
399 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
400 &request->header.iu_length);
401 put_unaligned_le32(buffer_length, &request->buffer_length);
402 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
403 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
404 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
410 request->data_direction = SOP_READ_FLAG;
412 if (vpd_page & VPD_PAGE) {
414 cdb[2] = (u8)vpd_page;
416 cdb[4] = (u8)buffer_length;
418 case CISS_REPORT_LOG:
419 case CISS_REPORT_PHYS:
420 request->data_direction = SOP_READ_FLAG;
422 if (cmd == CISS_REPORT_PHYS)
423 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
425 cdb[1] = CISS_REPORT_LOG_EXTENDED;
426 put_unaligned_be32(buffer_length, &cdb[6]);
428 case CISS_GET_RAID_MAP:
429 request->data_direction = SOP_READ_FLAG;
431 cdb[1] = CISS_GET_RAID_MAP;
432 put_unaligned_be32(buffer_length, &cdb[6]);
435 request->data_direction = SOP_WRITE_FLAG;
437 cdb[6] = BMIC_FLUSH_CACHE;
438 put_unaligned_be16(buffer_length, &cdb[7]);
440 case BMIC_IDENTIFY_CONTROLLER:
441 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
442 request->data_direction = SOP_READ_FLAG;
445 put_unaligned_be16(buffer_length, &cdb[7]);
447 case BMIC_WRITE_HOST_WELLNESS:
448 request->data_direction = SOP_WRITE_FLAG;
451 put_unaligned_be16(buffer_length, &cdb[7]);
454 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
459 switch (request->data_direction) {
461 pci_dir = PCI_DMA_FROMDEVICE;
464 pci_dir = PCI_DMA_TODEVICE;
466 case SOP_NO_DIRECTION_FLAG:
467 pci_dir = PCI_DMA_NONE;
470 pci_dir = PCI_DMA_BIDIRECTIONAL;
474 *pci_direction = pci_dir;
476 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
477 buffer, buffer_length, pci_dir);
480 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
482 io_request->scmd = NULL;
483 io_request->status = 0;
484 io_request->error_info = NULL;
485 io_request->raid_bypass = false;
488 static struct pqi_io_request *pqi_alloc_io_request(
489 struct pqi_ctrl_info *ctrl_info)
491 struct pqi_io_request *io_request;
492 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
495 io_request = &ctrl_info->io_request_pool[i];
496 if (atomic_inc_return(&io_request->refcount) == 1)
498 atomic_dec(&io_request->refcount);
499 i = (i + 1) % ctrl_info->max_io_slots;
503 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
505 pqi_reinit_io_request(io_request);
510 static void pqi_free_io_request(struct pqi_io_request *io_request)
512 atomic_dec(&io_request->refcount);
515 static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
516 struct bmic_identify_controller *buffer)
520 struct pqi_raid_path_request request;
522 rc = pqi_build_raid_path_request(ctrl_info, &request,
523 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
524 sizeof(*buffer), 0, &pci_direction);
528 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
531 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
537 static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
538 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
542 struct pqi_raid_path_request request;
544 rc = pqi_build_raid_path_request(ctrl_info, &request,
545 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
550 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
553 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
559 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
560 struct pqi_scsi_dev *device,
561 struct bmic_identify_physical_device *buffer,
562 size_t buffer_length)
566 u16 bmic_device_index;
567 struct pqi_raid_path_request request;
569 rc = pqi_build_raid_path_request(ctrl_info, &request,
570 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
571 buffer_length, 0, &pci_direction);
575 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
576 request.cdb[2] = (u8)bmic_device_index;
577 request.cdb[9] = (u8)(bmic_device_index >> 8);
579 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
580 0, NULL, NO_TIMEOUT);
582 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
588 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
589 enum bmic_flush_cache_shutdown_event shutdown_event)
592 struct pqi_raid_path_request request;
594 struct bmic_flush_cache *flush_cache;
597 * Don't bother trying to flush the cache if the controller is
600 if (pqi_ctrl_offline(ctrl_info))
603 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
607 flush_cache->shutdown_event = shutdown_event;
609 rc = pqi_build_raid_path_request(ctrl_info, &request,
610 SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
611 sizeof(*flush_cache), 0, &pci_direction);
615 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
616 0, NULL, NO_TIMEOUT);
618 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
627 static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
628 void *buffer, size_t buffer_length)
631 struct pqi_raid_path_request request;
634 rc = pqi_build_raid_path_request(ctrl_info, &request,
635 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
636 buffer_length, 0, &pci_direction);
640 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
641 0, NULL, NO_TIMEOUT);
643 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
651 struct bmic_host_wellness_driver_version {
653 u8 driver_version_tag[2];
654 __le16 driver_version_length;
655 char driver_version[32];
656 u8 dont_write_tag[2];
662 static int pqi_write_driver_version_to_host_wellness(
663 struct pqi_ctrl_info *ctrl_info)
666 struct bmic_host_wellness_driver_version *buffer;
667 size_t buffer_length;
669 buffer_length = sizeof(*buffer);
671 buffer = kmalloc(buffer_length, GFP_KERNEL);
675 buffer->start_tag[0] = '<';
676 buffer->start_tag[1] = 'H';
677 buffer->start_tag[2] = 'W';
678 buffer->start_tag[3] = '>';
679 buffer->driver_version_tag[0] = 'D';
680 buffer->driver_version_tag[1] = 'V';
681 put_unaligned_le16(sizeof(buffer->driver_version),
682 &buffer->driver_version_length);
683 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
684 sizeof(buffer->driver_version) - 1);
685 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
686 buffer->dont_write_tag[0] = 'D';
687 buffer->dont_write_tag[1] = 'W';
688 buffer->end_tag[0] = 'Z';
689 buffer->end_tag[1] = 'Z';
691 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
700 struct bmic_host_wellness_time {
705 u8 dont_write_tag[2];
711 static int pqi_write_current_time_to_host_wellness(
712 struct pqi_ctrl_info *ctrl_info)
715 struct bmic_host_wellness_time *buffer;
716 size_t buffer_length;
721 buffer_length = sizeof(*buffer);
723 buffer = kmalloc(buffer_length, GFP_KERNEL);
727 buffer->start_tag[0] = '<';
728 buffer->start_tag[1] = 'H';
729 buffer->start_tag[2] = 'W';
730 buffer->start_tag[3] = '>';
731 buffer->time_tag[0] = 'T';
732 buffer->time_tag[1] = 'D';
733 put_unaligned_le16(sizeof(buffer->time),
734 &buffer->time_length);
736 local_time = ktime_get_real_seconds();
737 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
738 year = tm.tm_year + 1900;
740 buffer->time[0] = bin2bcd(tm.tm_hour);
741 buffer->time[1] = bin2bcd(tm.tm_min);
742 buffer->time[2] = bin2bcd(tm.tm_sec);
744 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
745 buffer->time[5] = bin2bcd(tm.tm_mday);
746 buffer->time[6] = bin2bcd(year / 100);
747 buffer->time[7] = bin2bcd(year % 100);
749 buffer->dont_write_tag[0] = 'D';
750 buffer->dont_write_tag[1] = 'W';
751 buffer->end_tag[0] = 'Z';
752 buffer->end_tag[1] = 'Z';
754 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
761 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
763 static void pqi_update_time_worker(struct work_struct *work)
766 struct pqi_ctrl_info *ctrl_info;
768 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
771 if (pqi_ctrl_offline(ctrl_info))
774 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
776 dev_warn(&ctrl_info->pci_dev->dev,
777 "error updating time on controller\n");
779 schedule_delayed_work(&ctrl_info->update_time_work,
780 PQI_UPDATE_TIME_WORK_INTERVAL);
783 static inline void pqi_schedule_update_time_worker(
784 struct pqi_ctrl_info *ctrl_info)
786 schedule_delayed_work(&ctrl_info->update_time_work, 0);
789 static inline void pqi_cancel_update_time_worker(
790 struct pqi_ctrl_info *ctrl_info)
792 cancel_delayed_work_sync(&ctrl_info->update_time_work);
795 static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
796 void *buffer, size_t buffer_length)
800 struct pqi_raid_path_request request;
802 rc = pqi_build_raid_path_request(ctrl_info, &request,
803 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
807 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
810 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
816 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
820 size_t lun_list_length;
821 size_t lun_data_length;
822 size_t new_lun_list_length;
823 void *lun_data = NULL;
824 struct report_lun_header *report_lun_header;
826 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
827 if (!report_lun_header) {
832 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
833 sizeof(*report_lun_header));
837 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
840 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
842 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
848 if (lun_list_length == 0) {
849 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
853 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
857 new_lun_list_length = get_unaligned_be32(
858 &((struct report_lun_header *)lun_data)->list_length);
860 if (new_lun_list_length > lun_list_length) {
861 lun_list_length = new_lun_list_length;
867 kfree(report_lun_header);
879 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
882 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
886 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
889 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
892 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
893 struct report_phys_lun_extended **physdev_list,
894 struct report_log_lun_extended **logdev_list)
897 size_t logdev_list_length;
898 size_t logdev_data_length;
899 struct report_log_lun_extended *internal_logdev_list;
900 struct report_log_lun_extended *logdev_data;
901 struct report_lun_header report_lun_header;
903 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
905 dev_err(&ctrl_info->pci_dev->dev,
906 "report physical LUNs failed\n");
908 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
910 dev_err(&ctrl_info->pci_dev->dev,
911 "report logical LUNs failed\n");
914 * Tack the controller itself onto the end of the logical device list.
917 logdev_data = *logdev_list;
921 get_unaligned_be32(&logdev_data->header.list_length);
923 memset(&report_lun_header, 0, sizeof(report_lun_header));
925 (struct report_log_lun_extended *)&report_lun_header;
926 logdev_list_length = 0;
929 logdev_data_length = sizeof(struct report_lun_header) +
932 internal_logdev_list = kmalloc(logdev_data_length +
933 sizeof(struct report_log_lun_extended), GFP_KERNEL);
934 if (!internal_logdev_list) {
940 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
941 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
942 sizeof(struct report_log_lun_extended_entry));
943 put_unaligned_be32(logdev_list_length +
944 sizeof(struct report_log_lun_extended_entry),
945 &internal_logdev_list->header.list_length);
948 *logdev_list = internal_logdev_list;
953 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
954 int bus, int target, int lun)
957 device->target = target;
961 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
969 scsi3addr = device->scsi3addr;
970 lunid = get_unaligned_le32(scsi3addr);
972 if (pqi_is_hba_lunid(scsi3addr)) {
973 /* The specified device is the controller. */
974 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
975 device->target_lun_valid = true;
979 if (pqi_is_logical_device(device)) {
980 if (device->is_external_raid_device) {
981 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
982 target = (lunid >> 16) & 0x3fff;
985 bus = PQI_RAID_VOLUME_BUS;
987 lun = lunid & 0x3fff;
989 pqi_set_bus_target_lun(device, bus, target, lun);
990 device->target_lun_valid = true;
995 * Defer target and LUN assignment for non-controller physical devices
996 * because the SAS transport layer will make these assignments later.
998 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1001 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1002 struct pqi_scsi_dev *device)
1008 raid_level = SA_RAID_UNKNOWN;
1010 buffer = kmalloc(64, GFP_KERNEL);
1012 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1013 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1015 raid_level = buffer[8];
1016 if (raid_level > SA_RAID_MAX)
1017 raid_level = SA_RAID_UNKNOWN;
1022 device->raid_level = raid_level;
1025 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1026 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1030 u32 r5or6_blocks_per_row;
1031 unsigned int num_phys_disks;
1032 unsigned int num_raid_map_entries;
1034 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1036 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1037 err_msg = "RAID map too small";
1041 if (raid_map_size > sizeof(*raid_map)) {
1042 err_msg = "RAID map too large";
1046 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1047 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1048 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1049 num_raid_map_entries = num_phys_disks *
1050 get_unaligned_le16(&raid_map->row_cnt);
1052 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
1053 err_msg = "invalid number of map entries in RAID map";
1057 if (device->raid_level == SA_RAID_1) {
1058 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1059 err_msg = "invalid RAID-1 map";
1062 } else if (device->raid_level == SA_RAID_ADM) {
1063 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1064 err_msg = "invalid RAID-1(ADM) map";
1067 } else if ((device->raid_level == SA_RAID_5 ||
1068 device->raid_level == SA_RAID_6) &&
1069 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1071 r5or6_blocks_per_row =
1072 get_unaligned_le16(&raid_map->strip_size) *
1073 get_unaligned_le16(&raid_map->data_disks_per_row);
1074 if (r5or6_blocks_per_row == 0) {
1075 err_msg = "invalid RAID-5 or RAID-6 map";
1083 dev_warn(&ctrl_info->pci_dev->dev,
1084 "scsi %d:%d:%d:%d %s\n",
1085 ctrl_info->scsi_host->host_no,
1086 device->bus, device->target, device->lun, err_msg);
1091 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1092 struct pqi_scsi_dev *device)
1096 struct pqi_raid_path_request request;
1097 struct raid_map *raid_map;
1099 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1103 rc = pqi_build_raid_path_request(ctrl_info, &request,
1104 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
1105 sizeof(*raid_map), 0, &pci_direction);
1109 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
1112 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
1118 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1122 device->raid_map = raid_map;
1132 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1133 struct pqi_scsi_dev *device)
1139 buffer = kmalloc(64, GFP_KERNEL);
1143 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1144 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1148 #define RAID_BYPASS_STATUS 4
1149 #define RAID_BYPASS_CONFIGURED 0x1
1150 #define RAID_BYPASS_ENABLED 0x2
1152 bypass_status = buffer[RAID_BYPASS_STATUS];
1153 device->raid_bypass_configured =
1154 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1155 if (device->raid_bypass_configured &&
1156 (bypass_status & RAID_BYPASS_ENABLED) &&
1157 pqi_get_raid_map(ctrl_info, device) == 0)
1158 device->raid_bypass_enabled = true;
1165 * Use vendor-specific VPD to determine online/offline status of a volume.
1168 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1169 struct pqi_scsi_dev *device)
1173 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1174 bool volume_offline = true;
1176 struct ciss_vpd_logical_volume_status *vpd;
1178 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1182 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1183 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1187 if (vpd->page_code != CISS_VPD_LV_STATUS)
1190 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1191 volume_status) + vpd->page_length;
1192 if (page_length < sizeof(*vpd))
1195 volume_status = vpd->volume_status;
1196 volume_flags = get_unaligned_be32(&vpd->flags);
1197 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1202 device->volume_status = volume_status;
1203 device->volume_offline = volume_offline;
1206 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1207 struct pqi_scsi_dev *device)
1212 buffer = kmalloc(64, GFP_KERNEL);
1216 /* Send an inquiry to the device to see what it is. */
1217 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1221 scsi_sanitize_inquiry_string(&buffer[8], 8);
1222 scsi_sanitize_inquiry_string(&buffer[16], 16);
1224 device->devtype = buffer[0] & 0x1f;
1225 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1226 memcpy(device->model, &buffer[16], sizeof(device->model));
1228 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1229 if (device->is_external_raid_device) {
1230 device->raid_level = SA_RAID_UNKNOWN;
1231 device->volume_status = CISS_LV_OK;
1232 device->volume_offline = false;
1234 pqi_get_raid_level(ctrl_info, device);
1235 pqi_get_raid_bypass_status(ctrl_info, device);
1236 pqi_get_volume_status(ctrl_info, device);
1246 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1247 struct pqi_scsi_dev *device,
1248 struct bmic_identify_physical_device *id_phys)
1252 memset(id_phys, 0, sizeof(*id_phys));
1254 rc = pqi_identify_physical_device(ctrl_info, device,
1255 id_phys, sizeof(*id_phys));
1257 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1261 device->queue_depth =
1262 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1263 device->device_type = id_phys->device_type;
1264 device->active_path_index = id_phys->active_path_number;
1265 device->path_map = id_phys->redundant_path_present_map;
1266 memcpy(&device->box,
1267 &id_phys->alternate_paths_phys_box_on_port,
1268 sizeof(device->box));
1269 memcpy(&device->phys_connector,
1270 &id_phys->alternate_paths_phys_connector,
1271 sizeof(device->phys_connector));
1272 device->bay = id_phys->phys_bay_in_box;
1275 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1276 struct pqi_scsi_dev *device)
1279 static const char unknown_state_str[] =
1280 "Volume is in an unknown state (%u)";
1281 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1283 switch (device->volume_status) {
1285 status = "Volume online";
1287 case CISS_LV_FAILED:
1288 status = "Volume failed";
1290 case CISS_LV_NOT_CONFIGURED:
1291 status = "Volume not configured";
1293 case CISS_LV_DEGRADED:
1294 status = "Volume degraded";
1296 case CISS_LV_READY_FOR_RECOVERY:
1297 status = "Volume ready for recovery operation";
1299 case CISS_LV_UNDERGOING_RECOVERY:
1300 status = "Volume undergoing recovery";
1302 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1303 status = "Wrong physical drive was replaced";
1305 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1306 status = "A physical drive not properly connected";
1308 case CISS_LV_HARDWARE_OVERHEATING:
1309 status = "Hardware is overheating";
1311 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1312 status = "Hardware has overheated";
1314 case CISS_LV_UNDERGOING_EXPANSION:
1315 status = "Volume undergoing expansion";
1317 case CISS_LV_NOT_AVAILABLE:
1318 status = "Volume waiting for transforming volume";
1320 case CISS_LV_QUEUED_FOR_EXPANSION:
1321 status = "Volume queued for expansion";
1323 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1324 status = "Volume disabled due to SCSI ID conflict";
1326 case CISS_LV_EJECTED:
1327 status = "Volume has been ejected";
1329 case CISS_LV_UNDERGOING_ERASE:
1330 status = "Volume undergoing background erase";
1332 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1333 status = "Volume ready for predictive spare rebuild";
1335 case CISS_LV_UNDERGOING_RPI:
1336 status = "Volume undergoing rapid parity initialization";
1338 case CISS_LV_PENDING_RPI:
1339 status = "Volume queued for rapid parity initialization";
1341 case CISS_LV_ENCRYPTED_NO_KEY:
1342 status = "Encrypted volume inaccessible - key not present";
1344 case CISS_LV_UNDERGOING_ENCRYPTION:
1345 status = "Volume undergoing encryption process";
1347 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1348 status = "Volume undergoing encryption re-keying process";
1350 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1351 status = "Volume encrypted but encryption is disabled";
1353 case CISS_LV_PENDING_ENCRYPTION:
1354 status = "Volume pending migration to encrypted state";
1356 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1357 status = "Volume pending encryption rekeying";
1359 case CISS_LV_NOT_SUPPORTED:
1360 status = "Volume not supported on this controller";
1362 case CISS_LV_STATUS_UNAVAILABLE:
1363 status = "Volume status not available";
1366 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1367 unknown_state_str, device->volume_status);
1368 status = unknown_state_buffer;
1372 dev_info(&ctrl_info->pci_dev->dev,
1373 "scsi %d:%d:%d:%d %s\n",
1374 ctrl_info->scsi_host->host_no,
1375 device->bus, device->target, device->lun, status);
1378 static void pqi_rescan_worker(struct work_struct *work)
1380 struct pqi_ctrl_info *ctrl_info;
1382 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1385 pqi_scan_scsi_devices(ctrl_info);
1388 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1389 struct pqi_scsi_dev *device)
1393 if (pqi_is_logical_device(device))
1394 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1395 device->target, device->lun);
1397 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1402 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1403 struct pqi_scsi_dev *device)
1405 if (pqi_is_logical_device(device))
1406 scsi_remove_device(device->sdev);
1408 pqi_remove_sas_device(device);
1411 /* Assumes the SCSI device list lock is held. */
1413 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1414 int bus, int target, int lun)
1416 struct pqi_scsi_dev *device;
1418 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1419 scsi_device_list_entry)
1420 if (device->bus == bus && device->target == target &&
1427 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1428 struct pqi_scsi_dev *dev2)
1430 if (dev1->is_physical_device != dev2->is_physical_device)
1433 if (dev1->is_physical_device)
1434 return dev1->wwid == dev2->wwid;
1436 return memcmp(dev1->volume_id, dev2->volume_id,
1437 sizeof(dev1->volume_id)) == 0;
1440 enum pqi_find_result {
1446 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1447 struct pqi_scsi_dev *device_to_find,
1448 struct pqi_scsi_dev **matching_device)
1450 struct pqi_scsi_dev *device;
1452 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1453 scsi_device_list_entry) {
1454 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1455 device->scsi3addr)) {
1456 *matching_device = device;
1457 if (pqi_device_equal(device_to_find, device)) {
1458 if (device_to_find->volume_offline)
1459 return DEVICE_CHANGED;
1462 return DEVICE_CHANGED;
1466 return DEVICE_NOT_FOUND;
1469 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1471 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1472 char *action, struct pqi_scsi_dev *device)
1475 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1477 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1478 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1480 if (device->target_lun_valid)
1481 count += snprintf(buffer + count,
1482 PQI_DEV_INFO_BUFFER_LENGTH - count,
1487 count += snprintf(buffer + count,
1488 PQI_DEV_INFO_BUFFER_LENGTH - count,
1491 if (pqi_is_logical_device(device))
1492 count += snprintf(buffer + count,
1493 PQI_DEV_INFO_BUFFER_LENGTH - count,
1495 *((u32 *)&device->scsi3addr),
1496 *((u32 *)&device->scsi3addr[4]));
1498 count += snprintf(buffer + count,
1499 PQI_DEV_INFO_BUFFER_LENGTH - count,
1500 " %016llx", device->sas_address);
1502 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1504 scsi_device_type(device->devtype),
1508 if (pqi_is_logical_device(device)) {
1509 if (device->devtype == TYPE_DISK)
1510 count += snprintf(buffer + count,
1511 PQI_DEV_INFO_BUFFER_LENGTH - count,
1512 "SSDSmartPathCap%c En%c %-12s",
1513 device->raid_bypass_configured ? '+' : '-',
1514 device->raid_bypass_enabled ? '+' : '-',
1515 pqi_raid_level_to_string(device->raid_level));
1517 count += snprintf(buffer + count,
1518 PQI_DEV_INFO_BUFFER_LENGTH - count,
1519 "AIO%c", device->aio_enabled ? '+' : '-');
1520 if (device->devtype == TYPE_DISK ||
1521 device->devtype == TYPE_ZBC)
1522 count += snprintf(buffer + count,
1523 PQI_DEV_INFO_BUFFER_LENGTH - count,
1524 " qd=%-6d", device->queue_depth);
1527 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
1530 /* Assumes the SCSI device list lock is held. */
1532 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1533 struct pqi_scsi_dev *new_device)
1535 existing_device->devtype = new_device->devtype;
1536 existing_device->device_type = new_device->device_type;
1537 existing_device->bus = new_device->bus;
1538 if (new_device->target_lun_valid) {
1539 existing_device->target = new_device->target;
1540 existing_device->lun = new_device->lun;
1541 existing_device->target_lun_valid = true;
1544 /* By definition, the scsi3addr and wwid fields are already the same. */
1546 existing_device->is_physical_device = new_device->is_physical_device;
1547 existing_device->is_external_raid_device =
1548 new_device->is_external_raid_device;
1549 existing_device->aio_enabled = new_device->aio_enabled;
1550 memcpy(existing_device->vendor, new_device->vendor,
1551 sizeof(existing_device->vendor));
1552 memcpy(existing_device->model, new_device->model,
1553 sizeof(existing_device->model));
1554 existing_device->sas_address = new_device->sas_address;
1555 existing_device->raid_level = new_device->raid_level;
1556 existing_device->queue_depth = new_device->queue_depth;
1557 existing_device->aio_handle = new_device->aio_handle;
1558 existing_device->volume_status = new_device->volume_status;
1559 existing_device->active_path_index = new_device->active_path_index;
1560 existing_device->path_map = new_device->path_map;
1561 existing_device->bay = new_device->bay;
1562 memcpy(existing_device->box, new_device->box,
1563 sizeof(existing_device->box));
1564 memcpy(existing_device->phys_connector, new_device->phys_connector,
1565 sizeof(existing_device->phys_connector));
1566 existing_device->offload_to_mirror = 0;
1567 kfree(existing_device->raid_map);
1568 existing_device->raid_map = new_device->raid_map;
1569 existing_device->raid_bypass_configured =
1570 new_device->raid_bypass_configured;
1571 existing_device->raid_bypass_enabled =
1572 new_device->raid_bypass_enabled;
1574 /* To prevent this from being freed later. */
1575 new_device->raid_map = NULL;
1578 static inline void pqi_free_device(struct pqi_scsi_dev *device)
1581 kfree(device->raid_map);
1587 * Called when exposing a new device to the OS fails in order to re-adjust
1588 * our internal SCSI device list to match the SCSI ML's view.
1591 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1592 struct pqi_scsi_dev *device)
1594 unsigned long flags;
1596 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1597 list_del(&device->scsi_device_list_entry);
1598 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1600 /* Allow the device structure to be freed later. */
1601 device->keep_device = false;
1604 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1605 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1609 unsigned long flags;
1610 enum pqi_find_result find_result;
1611 struct pqi_scsi_dev *device;
1612 struct pqi_scsi_dev *next;
1613 struct pqi_scsi_dev *matching_device;
1614 LIST_HEAD(add_list);
1615 LIST_HEAD(delete_list);
1618 * The idea here is to do as little work as possible while holding the
1619 * spinlock. That's why we go to great pains to defer anything other
1620 * than updating the internal device list until after we release the
1624 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1626 /* Assume that all devices in the existing list have gone away. */
1627 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1628 scsi_device_list_entry)
1629 device->device_gone = true;
1631 for (i = 0; i < num_new_devices; i++) {
1632 device = new_device_list[i];
1634 find_result = pqi_scsi_find_entry(ctrl_info, device,
1637 switch (find_result) {
1640 * The newly found device is already in the existing
1643 device->new_device = false;
1644 matching_device->device_gone = false;
1645 pqi_scsi_update_device(matching_device, device);
1647 case DEVICE_NOT_FOUND:
1649 * The newly found device is NOT in the existing device
1652 device->new_device = true;
1654 case DEVICE_CHANGED:
1656 * The original device has gone away and we need to add
1659 device->new_device = true;
1664 /* Process all devices that have gone away. */
1665 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1666 scsi_device_list_entry) {
1667 if (device->device_gone) {
1668 list_del(&device->scsi_device_list_entry);
1669 list_add_tail(&device->delete_list_entry, &delete_list);
1673 /* Process all new devices. */
1674 for (i = 0; i < num_new_devices; i++) {
1675 device = new_device_list[i];
1676 if (!device->new_device)
1678 if (device->volume_offline)
1680 list_add_tail(&device->scsi_device_list_entry,
1681 &ctrl_info->scsi_device_list);
1682 list_add_tail(&device->add_list_entry, &add_list);
1683 /* To prevent this device structure from being freed later. */
1684 device->keep_device = true;
1687 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1689 /* Remove all devices that have gone away. */
1690 list_for_each_entry_safe(device, next, &delete_list,
1691 delete_list_entry) {
1692 if (device->volume_offline) {
1693 pqi_dev_info(ctrl_info, "offline", device);
1694 pqi_show_volume_status(ctrl_info, device);
1696 pqi_dev_info(ctrl_info, "removed", device);
1699 pqi_remove_device(ctrl_info, device);
1700 list_del(&device->delete_list_entry);
1701 pqi_free_device(device);
1705 * Notify the SCSI ML if the queue depth of any existing device has
1708 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1709 scsi_device_list_entry) {
1710 if (device->sdev && device->queue_depth !=
1711 device->advertised_queue_depth) {
1712 device->advertised_queue_depth = device->queue_depth;
1713 scsi_change_queue_depth(device->sdev,
1714 device->advertised_queue_depth);
1718 /* Expose any new devices. */
1719 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1720 if (!device->sdev) {
1721 pqi_dev_info(ctrl_info, "added", device);
1722 rc = pqi_add_device(ctrl_info, device);
1724 dev_warn(&ctrl_info->pci_dev->dev,
1725 "scsi %d:%d:%d:%d addition failed, device not added\n",
1726 ctrl_info->scsi_host->host_no,
1727 device->bus, device->target,
1729 pqi_fixup_botched_add(ctrl_info, device);
1735 static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1737 bool is_supported = false;
1739 switch (device->devtype) {
1743 case TYPE_MEDIUM_CHANGER:
1744 case TYPE_ENCLOSURE:
1745 is_supported = true;
1749 * Only support the HBA controller itself as a RAID
1750 * controller. If it's a RAID controller other than
1751 * the HBA itself (an external RAID controller, for
1752 * example), we don't support it.
1754 if (pqi_is_hba_lunid(device->scsi3addr))
1755 is_supported = true;
1759 return is_supported;
1762 static inline bool pqi_skip_device(u8 *scsi3addr)
1764 /* Ignore all masked devices. */
1765 if (MASKED_DEVICE(scsi3addr))
1771 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1775 LIST_HEAD(new_device_list_head);
1776 struct report_phys_lun_extended *physdev_list = NULL;
1777 struct report_log_lun_extended *logdev_list = NULL;
1778 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1779 struct report_log_lun_extended_entry *log_lun_ext_entry;
1780 struct bmic_identify_physical_device *id_phys = NULL;
1783 struct pqi_scsi_dev **new_device_list = NULL;
1784 struct pqi_scsi_dev *device;
1785 struct pqi_scsi_dev *next;
1786 unsigned int num_new_devices;
1787 unsigned int num_valid_devices;
1788 bool is_physical_device;
1790 static char *out_of_memory_msg =
1791 "failed to allocate memory, device discovery stopped";
1793 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1799 get_unaligned_be32(&physdev_list->header.list_length)
1800 / sizeof(physdev_list->lun_entries[0]);
1806 get_unaligned_be32(&logdev_list->header.list_length)
1807 / sizeof(logdev_list->lun_entries[0]);
1811 if (num_physicals) {
1813 * We need this buffer for calls to pqi_get_physical_disk_info()
1814 * below. We allocate it here instead of inside
1815 * pqi_get_physical_disk_info() because it's a fairly large
1818 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1820 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1827 num_new_devices = num_physicals + num_logicals;
1829 new_device_list = kmalloc(sizeof(*new_device_list) *
1830 num_new_devices, GFP_KERNEL);
1831 if (!new_device_list) {
1832 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1837 for (i = 0; i < num_new_devices; i++) {
1838 device = kzalloc(sizeof(*device), GFP_KERNEL);
1840 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1845 list_add_tail(&device->new_device_list_entry,
1846 &new_device_list_head);
1850 num_valid_devices = 0;
1852 for (i = 0; i < num_new_devices; i++) {
1854 if (i < num_physicals) {
1855 is_physical_device = true;
1856 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1857 log_lun_ext_entry = NULL;
1858 scsi3addr = phys_lun_ext_entry->lunid;
1860 is_physical_device = false;
1861 phys_lun_ext_entry = NULL;
1863 &logdev_list->lun_entries[i - num_physicals];
1864 scsi3addr = log_lun_ext_entry->lunid;
1867 if (is_physical_device && pqi_skip_device(scsi3addr))
1871 device = list_next_entry(device, new_device_list_entry);
1873 device = list_first_entry(&new_device_list_head,
1874 struct pqi_scsi_dev, new_device_list_entry);
1876 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1877 device->is_physical_device = is_physical_device;
1878 if (!is_physical_device)
1879 device->is_external_raid_device =
1880 pqi_is_external_raid_addr(scsi3addr);
1882 /* Gather information about the device. */
1883 rc = pqi_get_device_info(ctrl_info, device);
1884 if (rc == -ENOMEM) {
1885 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1890 if (device->is_physical_device)
1891 dev_warn(&ctrl_info->pci_dev->dev,
1892 "obtaining device info failed, skipping physical device %016llx\n",
1894 &phys_lun_ext_entry->wwid));
1896 dev_warn(&ctrl_info->pci_dev->dev,
1897 "obtaining device info failed, skipping logical device %08x%08x\n",
1898 *((u32 *)&device->scsi3addr),
1899 *((u32 *)&device->scsi3addr[4]));
1904 if (!pqi_is_supported_device(device))
1907 pqi_assign_bus_target_lun(device);
1909 if (device->is_physical_device) {
1910 device->wwid = phys_lun_ext_entry->wwid;
1911 if ((phys_lun_ext_entry->device_flags &
1912 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1913 phys_lun_ext_entry->aio_handle)
1914 device->aio_enabled = true;
1916 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1917 sizeof(device->volume_id));
1920 switch (device->devtype) {
1923 case TYPE_ENCLOSURE:
1924 if (device->is_physical_device) {
1925 device->sas_address =
1926 get_unaligned_be64(&device->wwid);
1927 if (device->devtype == TYPE_DISK ||
1928 device->devtype == TYPE_ZBC) {
1929 device->aio_handle =
1930 phys_lun_ext_entry->aio_handle;
1931 pqi_get_physical_disk_info(ctrl_info,
1938 new_device_list[num_valid_devices++] = device;
1941 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1944 list_for_each_entry_safe(device, next, &new_device_list_head,
1945 new_device_list_entry) {
1946 if (device->keep_device)
1948 list_del(&device->new_device_list_entry);
1949 pqi_free_device(device);
1952 kfree(new_device_list);
1953 kfree(physdev_list);
1960 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1962 unsigned long flags;
1963 struct pqi_scsi_dev *device;
1966 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1968 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
1969 struct pqi_scsi_dev, scsi_device_list_entry);
1971 list_del(&device->scsi_device_list_entry);
1973 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
1980 pqi_remove_device(ctrl_info, device);
1981 pqi_free_device(device);
1985 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1989 if (pqi_ctrl_offline(ctrl_info))
1992 mutex_lock(&ctrl_info->scan_mutex);
1994 rc = pqi_update_scsi_devices(ctrl_info);
1996 pqi_schedule_rescan_worker_delayed(ctrl_info);
1998 mutex_unlock(&ctrl_info->scan_mutex);
2003 static void pqi_scan_start(struct Scsi_Host *shost)
2005 pqi_scan_scsi_devices(shost_to_hba(shost));
2008 /* Returns TRUE if scan is finished. */
2010 static int pqi_scan_finished(struct Scsi_Host *shost,
2011 unsigned long elapsed_time)
2013 struct pqi_ctrl_info *ctrl_info;
2015 ctrl_info = shost_priv(shost);
2017 return !mutex_is_locked(&ctrl_info->scan_mutex);
2020 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2022 mutex_lock(&ctrl_info->scan_mutex);
2023 mutex_unlock(&ctrl_info->scan_mutex);
2026 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2028 mutex_lock(&ctrl_info->lun_reset_mutex);
2029 mutex_unlock(&ctrl_info->lun_reset_mutex);
2032 static inline void pqi_set_encryption_info(
2033 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2036 u32 volume_blk_size;
2039 * Set the encryption tweak values based on logical block address.
2040 * If the block size is 512, the tweak value is equal to the LBA.
2041 * For other block sizes, tweak value is (LBA * block size) / 512.
2043 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2044 if (volume_blk_size != 512)
2045 first_block = (first_block * volume_blk_size) / 512;
2047 encryption_info->data_encryption_key_index =
2048 get_unaligned_le16(&raid_map->data_encryption_key_index);
2049 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2050 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2054 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2057 #define PQI_RAID_BYPASS_INELIGIBLE 1
2059 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2060 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2061 struct pqi_queue_group *queue_group)
2063 struct raid_map *raid_map;
2064 bool is_write = false;
2072 u32 first_row_offset;
2073 u32 last_row_offset;
2078 u32 r5or6_blocks_per_row;
2079 u64 r5or6_first_row;
2081 u32 r5or6_first_row_offset;
2082 u32 r5or6_last_row_offset;
2083 u32 r5or6_first_column;
2084 u32 r5or6_last_column;
2085 u16 data_disks_per_row;
2086 u32 total_disks_per_row;
2087 u16 layout_map_count;
2099 int offload_to_mirror;
2100 struct pqi_encryption_info *encryption_info_ptr;
2101 struct pqi_encryption_info encryption_info;
2102 #if BITS_PER_LONG == 32
2106 /* Check for valid opcode, get LBA and block count. */
2107 switch (scmd->cmnd[0]) {
2112 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2113 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2114 block_cnt = (u32)scmd->cmnd[4];
2122 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2123 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2129 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2130 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2136 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2137 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2140 /* Process via normal I/O path. */
2141 return PQI_RAID_BYPASS_INELIGIBLE;
2144 /* Check for write to non-RAID-0. */
2145 if (is_write && device->raid_level != SA_RAID_0)
2146 return PQI_RAID_BYPASS_INELIGIBLE;
2148 if (unlikely(block_cnt == 0))
2149 return PQI_RAID_BYPASS_INELIGIBLE;
2151 last_block = first_block + block_cnt - 1;
2152 raid_map = device->raid_map;
2154 /* Check for invalid block or wraparound. */
2155 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2156 last_block < first_block)
2157 return PQI_RAID_BYPASS_INELIGIBLE;
2159 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2160 strip_size = get_unaligned_le16(&raid_map->strip_size);
2161 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2163 /* Calculate stripe information for the request. */
2164 blocks_per_row = data_disks_per_row * strip_size;
2165 #if BITS_PER_LONG == 32
2166 tmpdiv = first_block;
2167 do_div(tmpdiv, blocks_per_row);
2169 tmpdiv = last_block;
2170 do_div(tmpdiv, blocks_per_row);
2172 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2173 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2174 tmpdiv = first_row_offset;
2175 do_div(tmpdiv, strip_size);
2176 first_column = tmpdiv;
2177 tmpdiv = last_row_offset;
2178 do_div(tmpdiv, strip_size);
2179 last_column = tmpdiv;
2181 first_row = first_block / blocks_per_row;
2182 last_row = last_block / blocks_per_row;
2183 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2184 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2185 first_column = first_row_offset / strip_size;
2186 last_column = last_row_offset / strip_size;
2189 /* If this isn't a single row/column then give to the controller. */
2190 if (first_row != last_row || first_column != last_column)
2191 return PQI_RAID_BYPASS_INELIGIBLE;
2193 /* Proceeding with driver mapping. */
2194 total_disks_per_row = data_disks_per_row +
2195 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2196 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2197 get_unaligned_le16(&raid_map->row_cnt);
2198 map_index = (map_row * total_disks_per_row) + first_column;
2201 if (device->raid_level == SA_RAID_1) {
2202 if (device->offload_to_mirror)
2203 map_index += data_disks_per_row;
2204 device->offload_to_mirror = !device->offload_to_mirror;
2205 } else if (device->raid_level == SA_RAID_ADM) {
2208 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2211 offload_to_mirror = device->offload_to_mirror;
2212 if (offload_to_mirror == 0) {
2213 /* use physical disk in the first mirrored group. */
2214 map_index %= data_disks_per_row;
2218 * Determine mirror group that map_index
2221 current_group = map_index / data_disks_per_row;
2223 if (offload_to_mirror != current_group) {
2225 layout_map_count - 1) {
2227 * Select raid index from
2230 map_index += data_disks_per_row;
2234 * Select raid index from first
2237 map_index %= data_disks_per_row;
2241 } while (offload_to_mirror != current_group);
2244 /* Set mirror group to use next time. */
2246 (offload_to_mirror >= layout_map_count - 1) ?
2247 0 : offload_to_mirror + 1;
2248 WARN_ON(offload_to_mirror >= layout_map_count);
2249 device->offload_to_mirror = offload_to_mirror;
2251 * Avoid direct use of device->offload_to_mirror within this
2252 * function since multiple threads might simultaneously
2253 * increment it beyond the range of device->layout_map_count -1.
2255 } else if ((device->raid_level == SA_RAID_5 ||
2256 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2258 /* Verify first and last block are in same RAID group */
2259 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2260 stripesize = r5or6_blocks_per_row * layout_map_count;
2261 #if BITS_PER_LONG == 32
2262 tmpdiv = first_block;
2263 first_group = do_div(tmpdiv, stripesize);
2264 tmpdiv = first_group;
2265 do_div(tmpdiv, r5or6_blocks_per_row);
2266 first_group = tmpdiv;
2267 tmpdiv = last_block;
2268 last_group = do_div(tmpdiv, stripesize);
2269 tmpdiv = last_group;
2270 do_div(tmpdiv, r5or6_blocks_per_row);
2271 last_group = tmpdiv;
2273 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2274 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2276 if (first_group != last_group)
2277 return PQI_RAID_BYPASS_INELIGIBLE;
2279 /* Verify request is in a single row of RAID 5/6 */
2280 #if BITS_PER_LONG == 32
2281 tmpdiv = first_block;
2282 do_div(tmpdiv, stripesize);
2283 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2284 tmpdiv = last_block;
2285 do_div(tmpdiv, stripesize);
2286 r5or6_last_row = r0_last_row = tmpdiv;
2288 first_row = r5or6_first_row = r0_first_row =
2289 first_block / stripesize;
2290 r5or6_last_row = r0_last_row = last_block / stripesize;
2292 if (r5or6_first_row != r5or6_last_row)
2293 return PQI_RAID_BYPASS_INELIGIBLE;
2295 /* Verify request is in a single column */
2296 #if BITS_PER_LONG == 32
2297 tmpdiv = first_block;
2298 first_row_offset = do_div(tmpdiv, stripesize);
2299 tmpdiv = first_row_offset;
2300 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2301 r5or6_first_row_offset = first_row_offset;
2302 tmpdiv = last_block;
2303 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2304 tmpdiv = r5or6_last_row_offset;
2305 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2306 tmpdiv = r5or6_first_row_offset;
2307 do_div(tmpdiv, strip_size);
2308 first_column = r5or6_first_column = tmpdiv;
2309 tmpdiv = r5or6_last_row_offset;
2310 do_div(tmpdiv, strip_size);
2311 r5or6_last_column = tmpdiv;
2313 first_row_offset = r5or6_first_row_offset =
2314 (u32)((first_block % stripesize) %
2315 r5or6_blocks_per_row);
2317 r5or6_last_row_offset =
2318 (u32)((last_block % stripesize) %
2319 r5or6_blocks_per_row);
2321 first_column = r5or6_first_row_offset / strip_size;
2322 r5or6_first_column = first_column;
2323 r5or6_last_column = r5or6_last_row_offset / strip_size;
2325 if (r5or6_first_column != r5or6_last_column)
2326 return PQI_RAID_BYPASS_INELIGIBLE;
2328 /* Request is eligible */
2330 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2331 get_unaligned_le16(&raid_map->row_cnt);
2333 map_index = (first_group *
2334 (get_unaligned_le16(&raid_map->row_cnt) *
2335 total_disks_per_row)) +
2336 (map_row * total_disks_per_row) + first_column;
2339 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2340 return PQI_RAID_BYPASS_INELIGIBLE;
2342 aio_handle = raid_map->disk_data[map_index].aio_handle;
2343 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2344 first_row * strip_size +
2345 (first_row_offset - first_column * strip_size);
2346 disk_block_cnt = block_cnt;
2348 /* Handle differing logical/physical block sizes. */
2349 if (raid_map->phys_blk_shift) {
2350 disk_block <<= raid_map->phys_blk_shift;
2351 disk_block_cnt <<= raid_map->phys_blk_shift;
2354 if (unlikely(disk_block_cnt > 0xffff))
2355 return PQI_RAID_BYPASS_INELIGIBLE;
2357 /* Build the new CDB for the physical disk I/O. */
2358 if (disk_block > 0xffffffff) {
2359 cdb[0] = is_write ? WRITE_16 : READ_16;
2361 put_unaligned_be64(disk_block, &cdb[2]);
2362 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2367 cdb[0] = is_write ? WRITE_10 : READ_10;
2369 put_unaligned_be32((u32)disk_block, &cdb[2]);
2371 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2376 if (get_unaligned_le16(&raid_map->flags) &
2377 RAID_MAP_ENCRYPTION_ENABLED) {
2378 pqi_set_encryption_info(&encryption_info, raid_map,
2380 encryption_info_ptr = &encryption_info;
2382 encryption_info_ptr = NULL;
2385 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2386 cdb, cdb_length, queue_group, encryption_info_ptr, true);
2389 #define PQI_STATUS_IDLE 0x0
2391 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2392 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2394 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2395 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2396 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2397 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2398 #define PQI_DEVICE_STATE_ERROR 0x4
2400 #define PQI_MODE_READY_TIMEOUT_SECS 30
2401 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2403 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2405 struct pqi_device_registers __iomem *pqi_registers;
2406 unsigned long timeout;
2410 pqi_registers = ctrl_info->pqi_registers;
2411 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2414 signature = readq(&pqi_registers->signature);
2415 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2416 sizeof(signature)) == 0)
2418 if (time_after(jiffies, timeout)) {
2419 dev_err(&ctrl_info->pci_dev->dev,
2420 "timed out waiting for PQI signature\n");
2423 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2427 status = readb(&pqi_registers->function_and_status_code);
2428 if (status == PQI_STATUS_IDLE)
2430 if (time_after(jiffies, timeout)) {
2431 dev_err(&ctrl_info->pci_dev->dev,
2432 "timed out waiting for PQI IDLE\n");
2435 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2439 if (readl(&pqi_registers->device_status) ==
2440 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2442 if (time_after(jiffies, timeout)) {
2443 dev_err(&ctrl_info->pci_dev->dev,
2444 "timed out waiting for PQI all registers ready\n");
2447 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2453 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2455 struct pqi_scsi_dev *device;
2457 device = io_request->scmd->device->hostdata;
2458 device->raid_bypass_enabled = false;
2459 device->aio_enabled = false;
2462 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
2464 struct pqi_ctrl_info *ctrl_info;
2465 struct pqi_scsi_dev *device;
2467 device = sdev->hostdata;
2468 if (device->device_offline)
2471 device->device_offline = true;
2472 scsi_device_set_state(sdev, SDEV_OFFLINE);
2473 ctrl_info = shost_to_hba(sdev->host);
2474 pqi_schedule_rescan_worker(ctrl_info);
2475 dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n",
2476 path, ctrl_info->scsi_host->host_no, device->bus,
2477 device->target, device->lun);
2480 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2484 struct scsi_cmnd *scmd;
2485 struct pqi_raid_error_info *error_info;
2486 size_t sense_data_length;
2489 struct scsi_sense_hdr sshdr;
2491 scmd = io_request->scmd;
2495 error_info = io_request->error_info;
2496 scsi_status = error_info->status;
2499 switch (error_info->data_out_result) {
2500 case PQI_DATA_IN_OUT_GOOD:
2502 case PQI_DATA_IN_OUT_UNDERFLOW:
2504 get_unaligned_le32(&error_info->data_out_transferred);
2505 residual_count = scsi_bufflen(scmd) - xfer_count;
2506 scsi_set_resid(scmd, residual_count);
2507 if (xfer_count < scmd->underflow)
2508 host_byte = DID_SOFT_ERROR;
2510 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2511 case PQI_DATA_IN_OUT_ABORTED:
2512 host_byte = DID_ABORT;
2514 case PQI_DATA_IN_OUT_TIMEOUT:
2515 host_byte = DID_TIME_OUT;
2517 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2518 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2519 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2520 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2521 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2522 case PQI_DATA_IN_OUT_ERROR:
2523 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2524 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2525 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2526 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2527 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2528 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2529 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2530 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2531 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2532 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2534 host_byte = DID_ERROR;
2538 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2539 if (sense_data_length == 0)
2541 get_unaligned_le16(&error_info->response_data_length);
2542 if (sense_data_length) {
2543 if (sense_data_length > sizeof(error_info->data))
2544 sense_data_length = sizeof(error_info->data);
2546 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2547 scsi_normalize_sense(error_info->data,
2548 sense_data_length, &sshdr) &&
2549 sshdr.sense_key == HARDWARE_ERROR &&
2550 sshdr.asc == 0x3e &&
2551 sshdr.ascq == 0x1) {
2552 pqi_take_device_offline(scmd->device, "RAID");
2553 host_byte = DID_NO_CONNECT;
2556 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2557 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2558 memcpy(scmd->sense_buffer, error_info->data,
2562 scmd->result = scsi_status;
2563 set_host_byte(scmd, host_byte);
2566 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2570 struct scsi_cmnd *scmd;
2571 struct pqi_aio_error_info *error_info;
2572 size_t sense_data_length;
2575 bool device_offline;
2577 scmd = io_request->scmd;
2578 error_info = io_request->error_info;
2580 sense_data_length = 0;
2581 device_offline = false;
2583 switch (error_info->service_response) {
2584 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2585 scsi_status = error_info->status;
2587 case PQI_AIO_SERV_RESPONSE_FAILURE:
2588 switch (error_info->status) {
2589 case PQI_AIO_STATUS_IO_ABORTED:
2590 scsi_status = SAM_STAT_TASK_ABORTED;
2592 case PQI_AIO_STATUS_UNDERRUN:
2593 scsi_status = SAM_STAT_GOOD;
2594 residual_count = get_unaligned_le32(
2595 &error_info->residual_count);
2596 scsi_set_resid(scmd, residual_count);
2597 xfer_count = scsi_bufflen(scmd) - residual_count;
2598 if (xfer_count < scmd->underflow)
2599 host_byte = DID_SOFT_ERROR;
2601 case PQI_AIO_STATUS_OVERRUN:
2602 scsi_status = SAM_STAT_GOOD;
2604 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2605 pqi_aio_path_disabled(io_request);
2606 scsi_status = SAM_STAT_GOOD;
2607 io_request->status = -EAGAIN;
2609 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2610 case PQI_AIO_STATUS_INVALID_DEVICE:
2611 if (!io_request->raid_bypass) {
2612 device_offline = true;
2613 pqi_take_device_offline(scmd->device, "AIO");
2614 host_byte = DID_NO_CONNECT;
2616 scsi_status = SAM_STAT_CHECK_CONDITION;
2618 case PQI_AIO_STATUS_IO_ERROR:
2620 scsi_status = SAM_STAT_CHECK_CONDITION;
2624 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2625 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2626 scsi_status = SAM_STAT_GOOD;
2628 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2629 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2631 scsi_status = SAM_STAT_CHECK_CONDITION;
2635 if (error_info->data_present) {
2637 get_unaligned_le16(&error_info->data_length);
2638 if (sense_data_length) {
2639 if (sense_data_length > sizeof(error_info->data))
2640 sense_data_length = sizeof(error_info->data);
2641 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2642 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2643 memcpy(scmd->sense_buffer, error_info->data,
2648 if (device_offline && sense_data_length == 0)
2649 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2652 scmd->result = scsi_status;
2653 set_host_byte(scmd, host_byte);
2656 static void pqi_process_io_error(unsigned int iu_type,
2657 struct pqi_io_request *io_request)
2660 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2661 pqi_process_raid_io_error(io_request);
2663 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2664 pqi_process_aio_io_error(io_request);
2669 static int pqi_interpret_task_management_response(
2670 struct pqi_task_management_response *response)
2674 switch (response->response_code) {
2675 case SOP_TMF_COMPLETE:
2676 case SOP_TMF_FUNCTION_SUCCEEDED:
2687 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2688 struct pqi_queue_group *queue_group)
2690 unsigned int num_responses;
2693 struct pqi_io_request *io_request;
2694 struct pqi_io_response *response;
2698 oq_ci = queue_group->oq_ci_copy;
2701 oq_pi = *queue_group->oq_pi;
2706 response = queue_group->oq_element_array +
2707 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2709 request_id = get_unaligned_le16(&response->request_id);
2710 WARN_ON(request_id >= ctrl_info->max_io_slots);
2712 io_request = &ctrl_info->io_request_pool[request_id];
2713 WARN_ON(atomic_read(&io_request->refcount) == 0);
2715 switch (response->header.iu_type) {
2716 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2717 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2718 if (io_request->scmd)
2719 io_request->scmd->result = 0;
2721 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2723 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2724 io_request->status =
2725 pqi_interpret_task_management_response(
2728 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2729 pqi_aio_path_disabled(io_request);
2730 io_request->status = -EAGAIN;
2732 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2733 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2734 io_request->error_info = ctrl_info->error_buffer +
2735 (get_unaligned_le16(&response->error_index) *
2736 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2737 pqi_process_io_error(response->header.iu_type,
2741 dev_err(&ctrl_info->pci_dev->dev,
2742 "unexpected IU type: 0x%x\n",
2743 response->header.iu_type);
2747 io_request->io_complete_callback(io_request,
2748 io_request->context);
2751 * Note that the I/O request structure CANNOT BE TOUCHED after
2752 * returning from the I/O completion callback!
2755 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2758 if (num_responses) {
2759 queue_group->oq_ci_copy = oq_ci;
2760 writel(oq_ci, queue_group->oq_ci);
2763 return num_responses;
2766 static inline unsigned int pqi_num_elements_free(unsigned int pi,
2767 unsigned int ci, unsigned int elements_in_queue)
2769 unsigned int num_elements_used;
2772 num_elements_used = pi - ci;
2774 num_elements_used = elements_in_queue - ci + pi;
2776 return elements_in_queue - num_elements_used - 1;
2779 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
2780 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2784 unsigned long flags;
2786 struct pqi_queue_group *queue_group;
2788 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2789 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2792 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2794 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2795 iq_ci = *queue_group->iq_ci[RAID_PATH];
2797 if (pqi_num_elements_free(iq_pi, iq_ci,
2798 ctrl_info->num_elements_per_iq))
2801 spin_unlock_irqrestore(
2802 &queue_group->submit_lock[RAID_PATH], flags);
2804 if (pqi_ctrl_offline(ctrl_info))
2808 next_element = queue_group->iq_element_array[RAID_PATH] +
2809 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2811 memcpy(next_element, iu, iu_length);
2813 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
2814 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2817 * This write notifies the controller that an IU is available to be
2820 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2822 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
2825 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2826 struct pqi_event *event)
2828 struct pqi_event_acknowledge_request request;
2830 memset(&request, 0, sizeof(request));
2832 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2833 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2834 &request.header.iu_length);
2835 request.event_type = event->event_type;
2836 request.event_id = event->event_id;
2837 request.additional_event_id = event->additional_event_id;
2839 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
2842 static void pqi_event_worker(struct work_struct *work)
2845 struct pqi_ctrl_info *ctrl_info;
2846 struct pqi_event *event;
2848 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2850 pqi_ctrl_busy(ctrl_info);
2851 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
2852 if (pqi_ctrl_offline(ctrl_info))
2855 pqi_schedule_rescan_worker_delayed(ctrl_info);
2857 event = ctrl_info->events;
2858 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
2859 if (event->pending) {
2860 event->pending = false;
2861 pqi_acknowledge_event(ctrl_info, event);
2867 pqi_ctrl_unbusy(ctrl_info);
2870 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
2872 static void pqi_heartbeat_timer_handler(unsigned long data)
2875 u32 heartbeat_count;
2876 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2878 pqi_check_ctrl_health(ctrl_info);
2879 if (pqi_ctrl_offline(ctrl_info))
2882 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
2883 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
2885 if (num_interrupts == ctrl_info->previous_num_interrupts) {
2886 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
2887 dev_err(&ctrl_info->pci_dev->dev,
2888 "no heartbeat detected - last heartbeat count: %u\n",
2890 pqi_take_ctrl_offline(ctrl_info);
2894 ctrl_info->previous_num_interrupts = num_interrupts;
2897 ctrl_info->previous_heartbeat_count = heartbeat_count;
2898 mod_timer(&ctrl_info->heartbeat_timer,
2899 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2902 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2904 if (!ctrl_info->heartbeat_counter)
2907 ctrl_info->previous_num_interrupts =
2908 atomic_read(&ctrl_info->num_interrupts);
2909 ctrl_info->previous_heartbeat_count =
2910 pqi_read_heartbeat_counter(ctrl_info);
2912 ctrl_info->heartbeat_timer.expires =
2913 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2914 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2915 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
2916 add_timer(&ctrl_info->heartbeat_timer);
2919 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2921 del_timer_sync(&ctrl_info->heartbeat_timer);
2924 static inline int pqi_event_type_to_event_index(unsigned int event_type)
2928 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
2929 if (event_type == pqi_supported_event_types[index])
2935 static inline bool pqi_is_supported_event(unsigned int event_type)
2937 return pqi_event_type_to_event_index(event_type) != -1;
2940 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2942 unsigned int num_events;
2945 struct pqi_event_queue *event_queue;
2946 struct pqi_event_response *response;
2947 struct pqi_event *event;
2950 event_queue = &ctrl_info->event_queue;
2952 oq_ci = event_queue->oq_ci_copy;
2955 oq_pi = *event_queue->oq_pi;
2960 response = event_queue->oq_element_array +
2961 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2964 pqi_event_type_to_event_index(response->event_type);
2966 if (event_index >= 0) {
2967 if (response->request_acknowlege) {
2968 event = &ctrl_info->events[event_index];
2969 event->pending = true;
2970 event->event_type = response->event_type;
2971 event->event_id = response->event_id;
2972 event->additional_event_id =
2973 response->additional_event_id;
2977 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2981 event_queue->oq_ci_copy = oq_ci;
2982 writel(oq_ci, event_queue->oq_ci);
2983 schedule_work(&ctrl_info->event_work);
2989 #define PQI_LEGACY_INTX_MASK 0x1
2991 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
2995 struct pqi_device_registers __iomem *pqi_registers;
2996 volatile void __iomem *register_addr;
2998 pqi_registers = ctrl_info->pqi_registers;
3001 register_addr = &pqi_registers->legacy_intx_mask_clear;
3003 register_addr = &pqi_registers->legacy_intx_mask_set;
3005 intx_mask = readl(register_addr);
3006 intx_mask |= PQI_LEGACY_INTX_MASK;
3007 writel(intx_mask, register_addr);
3010 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3011 enum pqi_irq_mode new_mode)
3013 switch (ctrl_info->irq_mode) {
3019 pqi_configure_legacy_intx(ctrl_info, true);
3020 sis_enable_intx(ctrl_info);
3029 pqi_configure_legacy_intx(ctrl_info, false);
3030 sis_enable_msix(ctrl_info);
3035 pqi_configure_legacy_intx(ctrl_info, false);
3042 sis_enable_msix(ctrl_info);
3045 pqi_configure_legacy_intx(ctrl_info, true);
3046 sis_enable_intx(ctrl_info);
3054 ctrl_info->irq_mode = new_mode;
3057 #define PQI_LEGACY_INTX_PENDING 0x1
3059 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3064 switch (ctrl_info->irq_mode) {
3070 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3071 if (intx_status & PQI_LEGACY_INTX_PENDING)
3085 static irqreturn_t pqi_irq_handler(int irq, void *data)
3087 struct pqi_ctrl_info *ctrl_info;
3088 struct pqi_queue_group *queue_group;
3089 unsigned int num_responses_handled;
3092 ctrl_info = queue_group->ctrl_info;
3094 if (!pqi_is_valid_irq(ctrl_info))
3097 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3099 if (irq == ctrl_info->event_irq)
3100 num_responses_handled += pqi_process_event_intr(ctrl_info);
3102 if (num_responses_handled)
3103 atomic_inc(&ctrl_info->num_interrupts);
3105 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3106 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3111 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3113 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3117 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3119 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3120 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3121 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3123 dev_err(&pci_dev->dev,
3124 "irq %u init failed with error %d\n",
3125 pci_irq_vector(pci_dev, i), rc);
3128 ctrl_info->num_msix_vectors_initialized++;
3134 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3138 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3139 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3140 &ctrl_info->queue_groups[i]);
3142 ctrl_info->num_msix_vectors_initialized = 0;
3145 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3147 int num_vectors_enabled;
3149 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3150 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3151 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3152 if (num_vectors_enabled < 0) {
3153 dev_err(&ctrl_info->pci_dev->dev,
3154 "MSI-X init failed with error %d\n",
3155 num_vectors_enabled);
3156 return num_vectors_enabled;
3159 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
3160 ctrl_info->irq_mode = IRQ_MODE_MSIX;
3164 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3166 if (ctrl_info->num_msix_vectors_enabled) {
3167 pci_free_irq_vectors(ctrl_info->pci_dev);
3168 ctrl_info->num_msix_vectors_enabled = 0;
3172 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3175 size_t alloc_length;
3176 size_t element_array_length_per_iq;
3177 size_t element_array_length_per_oq;
3178 void *element_array;
3179 void *next_queue_index;
3180 void *aligned_pointer;
3181 unsigned int num_inbound_queues;
3182 unsigned int num_outbound_queues;
3183 unsigned int num_queue_indexes;
3184 struct pqi_queue_group *queue_group;
3186 element_array_length_per_iq =
3187 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3188 ctrl_info->num_elements_per_iq;
3189 element_array_length_per_oq =
3190 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3191 ctrl_info->num_elements_per_oq;
3192 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3193 num_outbound_queues = ctrl_info->num_queue_groups;
3194 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3196 aligned_pointer = NULL;
3198 for (i = 0; i < num_inbound_queues; i++) {
3199 aligned_pointer = PTR_ALIGN(aligned_pointer,
3200 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3201 aligned_pointer += element_array_length_per_iq;
3204 for (i = 0; i < num_outbound_queues; i++) {
3205 aligned_pointer = PTR_ALIGN(aligned_pointer,
3206 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3207 aligned_pointer += element_array_length_per_oq;
3210 aligned_pointer = PTR_ALIGN(aligned_pointer,
3211 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3212 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3213 PQI_EVENT_OQ_ELEMENT_LENGTH;
3215 for (i = 0; i < num_queue_indexes; i++) {
3216 aligned_pointer = PTR_ALIGN(aligned_pointer,
3217 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3218 aligned_pointer += sizeof(pqi_index_t);
3221 alloc_length = (size_t)aligned_pointer +
3222 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3224 alloc_length += PQI_EXTRA_SGL_MEMORY;
3226 ctrl_info->queue_memory_base =
3227 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3229 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3231 if (!ctrl_info->queue_memory_base)
3234 ctrl_info->queue_memory_length = alloc_length;
3236 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3237 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3239 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3240 queue_group = &ctrl_info->queue_groups[i];
3241 queue_group->iq_element_array[RAID_PATH] = element_array;
3242 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3243 ctrl_info->queue_memory_base_dma_handle +
3244 (element_array - ctrl_info->queue_memory_base);
3245 element_array += element_array_length_per_iq;
3246 element_array = PTR_ALIGN(element_array,
3247 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3248 queue_group->iq_element_array[AIO_PATH] = element_array;
3249 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3250 ctrl_info->queue_memory_base_dma_handle +
3251 (element_array - ctrl_info->queue_memory_base);
3252 element_array += element_array_length_per_iq;
3253 element_array = PTR_ALIGN(element_array,
3254 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3257 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3258 queue_group = &ctrl_info->queue_groups[i];
3259 queue_group->oq_element_array = element_array;
3260 queue_group->oq_element_array_bus_addr =
3261 ctrl_info->queue_memory_base_dma_handle +
3262 (element_array - ctrl_info->queue_memory_base);
3263 element_array += element_array_length_per_oq;
3264 element_array = PTR_ALIGN(element_array,
3265 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3268 ctrl_info->event_queue.oq_element_array = element_array;
3269 ctrl_info->event_queue.oq_element_array_bus_addr =
3270 ctrl_info->queue_memory_base_dma_handle +
3271 (element_array - ctrl_info->queue_memory_base);
3272 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3273 PQI_EVENT_OQ_ELEMENT_LENGTH;
3275 next_queue_index = PTR_ALIGN(element_array,
3276 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3278 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3279 queue_group = &ctrl_info->queue_groups[i];
3280 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3281 queue_group->iq_ci_bus_addr[RAID_PATH] =
3282 ctrl_info->queue_memory_base_dma_handle +
3283 (next_queue_index - ctrl_info->queue_memory_base);
3284 next_queue_index += sizeof(pqi_index_t);
3285 next_queue_index = PTR_ALIGN(next_queue_index,
3286 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3287 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3288 queue_group->iq_ci_bus_addr[AIO_PATH] =
3289 ctrl_info->queue_memory_base_dma_handle +
3290 (next_queue_index - ctrl_info->queue_memory_base);
3291 next_queue_index += sizeof(pqi_index_t);
3292 next_queue_index = PTR_ALIGN(next_queue_index,
3293 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3294 queue_group->oq_pi = next_queue_index;
3295 queue_group->oq_pi_bus_addr =
3296 ctrl_info->queue_memory_base_dma_handle +
3297 (next_queue_index - ctrl_info->queue_memory_base);
3298 next_queue_index += sizeof(pqi_index_t);
3299 next_queue_index = PTR_ALIGN(next_queue_index,
3300 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3303 ctrl_info->event_queue.oq_pi = next_queue_index;
3304 ctrl_info->event_queue.oq_pi_bus_addr =
3305 ctrl_info->queue_memory_base_dma_handle +
3306 (next_queue_index - ctrl_info->queue_memory_base);
3311 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3314 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3315 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3318 * Initialize the backpointers to the controller structure in
3319 * each operational queue group structure.
3321 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3322 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3325 * Assign IDs to all operational queues. Note that the IDs
3326 * assigned to operational IQs are independent of the IDs
3327 * assigned to operational OQs.
3329 ctrl_info->event_queue.oq_id = next_oq_id++;
3330 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3331 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3332 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3333 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3337 * Assign MSI-X table entry indexes to all queues. Note that the
3338 * interrupt for the event queue is shared with the first queue group.
3340 ctrl_info->event_queue.int_msg_num = 0;
3341 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3342 ctrl_info->queue_groups[i].int_msg_num = i;
3344 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3345 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3346 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3347 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3348 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3352 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3354 size_t alloc_length;
3355 struct pqi_admin_queues_aligned *admin_queues_aligned;
3356 struct pqi_admin_queues *admin_queues;
3358 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3359 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3361 ctrl_info->admin_queue_memory_base =
3362 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3364 &ctrl_info->admin_queue_memory_base_dma_handle,
3367 if (!ctrl_info->admin_queue_memory_base)
3370 ctrl_info->admin_queue_memory_length = alloc_length;
3372 admin_queues = &ctrl_info->admin_queues;
3373 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3374 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3375 admin_queues->iq_element_array =
3376 &admin_queues_aligned->iq_element_array;
3377 admin_queues->oq_element_array =
3378 &admin_queues_aligned->oq_element_array;
3379 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3380 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3382 admin_queues->iq_element_array_bus_addr =
3383 ctrl_info->admin_queue_memory_base_dma_handle +
3384 (admin_queues->iq_element_array -
3385 ctrl_info->admin_queue_memory_base);
3386 admin_queues->oq_element_array_bus_addr =
3387 ctrl_info->admin_queue_memory_base_dma_handle +
3388 (admin_queues->oq_element_array -
3389 ctrl_info->admin_queue_memory_base);
3390 admin_queues->iq_ci_bus_addr =
3391 ctrl_info->admin_queue_memory_base_dma_handle +
3392 ((void *)admin_queues->iq_ci -
3393 ctrl_info->admin_queue_memory_base);
3394 admin_queues->oq_pi_bus_addr =
3395 ctrl_info->admin_queue_memory_base_dma_handle +
3396 ((void *)admin_queues->oq_pi -
3397 ctrl_info->admin_queue_memory_base);
3402 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3403 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3405 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3407 struct pqi_device_registers __iomem *pqi_registers;
3408 struct pqi_admin_queues *admin_queues;
3409 unsigned long timeout;
3413 pqi_registers = ctrl_info->pqi_registers;
3414 admin_queues = &ctrl_info->admin_queues;
3416 writeq((u64)admin_queues->iq_element_array_bus_addr,
3417 &pqi_registers->admin_iq_element_array_addr);
3418 writeq((u64)admin_queues->oq_element_array_bus_addr,
3419 &pqi_registers->admin_oq_element_array_addr);
3420 writeq((u64)admin_queues->iq_ci_bus_addr,
3421 &pqi_registers->admin_iq_ci_addr);
3422 writeq((u64)admin_queues->oq_pi_bus_addr,
3423 &pqi_registers->admin_oq_pi_addr);
3425 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3426 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3427 (admin_queues->int_msg_num << 16);
3428 writel(reg, &pqi_registers->admin_iq_num_elements);
3429 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3430 &pqi_registers->function_and_status_code);
3432 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3434 status = readb(&pqi_registers->function_and_status_code);
3435 if (status == PQI_STATUS_IDLE)
3437 if (time_after(jiffies, timeout))
3439 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3443 * The offset registers are not initialized to the correct
3444 * offsets until *after* the create admin queue pair command
3445 * completes successfully.
3447 admin_queues->iq_pi = ctrl_info->iomem_base +
3448 PQI_DEVICE_REGISTERS_OFFSET +
3449 readq(&pqi_registers->admin_iq_pi_offset);
3450 admin_queues->oq_ci = ctrl_info->iomem_base +
3451 PQI_DEVICE_REGISTERS_OFFSET +
3452 readq(&pqi_registers->admin_oq_ci_offset);
3457 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3458 struct pqi_general_admin_request *request)
3460 struct pqi_admin_queues *admin_queues;
3464 admin_queues = &ctrl_info->admin_queues;
3465 iq_pi = admin_queues->iq_pi_copy;
3467 next_element = admin_queues->iq_element_array +
3468 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3470 memcpy(next_element, request, sizeof(*request));
3472 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3473 admin_queues->iq_pi_copy = iq_pi;
3476 * This write notifies the controller that an IU is available to be
3479 writel(iq_pi, admin_queues->iq_pi);
3482 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
3484 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3485 struct pqi_general_admin_response *response)
3487 struct pqi_admin_queues *admin_queues;
3490 unsigned long timeout;
3492 admin_queues = &ctrl_info->admin_queues;
3493 oq_ci = admin_queues->oq_ci_copy;
3495 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
3498 oq_pi = *admin_queues->oq_pi;
3501 if (time_after(jiffies, timeout)) {
3502 dev_err(&ctrl_info->pci_dev->dev,
3503 "timed out waiting for admin response\n");
3506 if (!sis_is_firmware_running(ctrl_info))
3508 usleep_range(1000, 2000);
3511 memcpy(response, admin_queues->oq_element_array +
3512 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3514 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3515 admin_queues->oq_ci_copy = oq_ci;
3516 writel(oq_ci, admin_queues->oq_ci);
3521 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3522 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3523 struct pqi_io_request *io_request)
3525 struct pqi_io_request *next;
3530 unsigned long flags;
3531 unsigned int num_elements_needed;
3532 unsigned int num_elements_to_end_of_queue;
3534 struct pqi_iu_header *request;
3536 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3539 io_request->queue_group = queue_group;
3540 list_add_tail(&io_request->request_list_entry,
3541 &queue_group->request_list[path]);
3544 iq_pi = queue_group->iq_pi_copy[path];
3546 list_for_each_entry_safe(io_request, next,
3547 &queue_group->request_list[path], request_list_entry) {
3549 request = io_request->iu;
3551 iu_length = get_unaligned_le16(&request->iu_length) +
3552 PQI_REQUEST_HEADER_LENGTH;
3553 num_elements_needed =
3554 DIV_ROUND_UP(iu_length,
3555 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3557 iq_ci = *queue_group->iq_ci[path];
3559 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3560 ctrl_info->num_elements_per_iq))
3563 put_unaligned_le16(queue_group->oq_id,
3564 &request->response_queue_id);
3566 next_element = queue_group->iq_element_array[path] +
3567 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3569 num_elements_to_end_of_queue =
3570 ctrl_info->num_elements_per_iq - iq_pi;
3572 if (num_elements_needed <= num_elements_to_end_of_queue) {
3573 memcpy(next_element, request, iu_length);
3575 copy_count = num_elements_to_end_of_queue *
3576 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3577 memcpy(next_element, request, copy_count);
3578 memcpy(queue_group->iq_element_array[path],
3579 (u8 *)request + copy_count,
3580 iu_length - copy_count);
3583 iq_pi = (iq_pi + num_elements_needed) %
3584 ctrl_info->num_elements_per_iq;
3586 list_del(&io_request->request_list_entry);
3589 if (iq_pi != queue_group->iq_pi_copy[path]) {
3590 queue_group->iq_pi_copy[path] = iq_pi;
3592 * This write notifies the controller that one or more IUs are
3593 * available to be processed.
3595 writel(iq_pi, queue_group->iq_pi[path]);
3598 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3601 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3603 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3604 struct completion *wait)
3609 if (wait_for_completion_io_timeout(wait,
3610 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
3615 pqi_check_ctrl_health(ctrl_info);
3616 if (pqi_ctrl_offline(ctrl_info)) {
3625 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3628 struct completion *waiting = context;
3633 static int pqi_submit_raid_request_synchronous_with_io_request(
3634 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3635 unsigned long timeout_msecs)
3638 DECLARE_COMPLETION_ONSTACK(wait);
3640 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3641 io_request->context = &wait;
3643 pqi_start_io(ctrl_info,
3644 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3647 if (timeout_msecs == NO_TIMEOUT) {
3648 pqi_wait_for_completion_io(ctrl_info, &wait);
3650 if (!wait_for_completion_io_timeout(&wait,
3651 msecs_to_jiffies(timeout_msecs))) {
3652 dev_warn(&ctrl_info->pci_dev->dev,
3653 "command timed out\n");
3661 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3662 struct pqi_iu_header *request, unsigned int flags,
3663 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3666 struct pqi_io_request *io_request;
3667 unsigned long start_jiffies;
3668 unsigned long msecs_blocked;
3672 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3673 * are mutually exclusive.
3676 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3677 if (down_interruptible(&ctrl_info->sync_request_sem))
3678 return -ERESTARTSYS;
3680 if (timeout_msecs == NO_TIMEOUT) {
3681 down(&ctrl_info->sync_request_sem);
3683 start_jiffies = jiffies;
3684 if (down_timeout(&ctrl_info->sync_request_sem,
3685 msecs_to_jiffies(timeout_msecs)))
3688 jiffies_to_msecs(jiffies - start_jiffies);
3689 if (msecs_blocked >= timeout_msecs) {
3693 timeout_msecs -= msecs_blocked;
3697 pqi_ctrl_busy(ctrl_info);
3698 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
3699 if (timeout_msecs == 0) {
3704 if (pqi_ctrl_offline(ctrl_info)) {
3709 io_request = pqi_alloc_io_request(ctrl_info);
3711 put_unaligned_le16(io_request->index,
3712 &(((struct pqi_raid_path_request *)request)->request_id));
3714 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3715 ((struct pqi_raid_path_request *)request)->error_index =
3716 ((struct pqi_raid_path_request *)request)->request_id;
3718 iu_length = get_unaligned_le16(&request->iu_length) +
3719 PQI_REQUEST_HEADER_LENGTH;
3720 memcpy(io_request->iu, request, iu_length);
3722 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3723 io_request, timeout_msecs);
3726 if (io_request->error_info)
3727 memcpy(error_info, io_request->error_info,
3728 sizeof(*error_info));
3730 memset(error_info, 0, sizeof(*error_info));
3731 } else if (rc == 0 && io_request->error_info) {
3733 struct pqi_raid_error_info *raid_error_info;
3735 raid_error_info = io_request->error_info;
3736 scsi_status = raid_error_info->status;
3738 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3739 raid_error_info->data_out_result ==
3740 PQI_DATA_IN_OUT_UNDERFLOW)
3741 scsi_status = SAM_STAT_GOOD;
3743 if (scsi_status != SAM_STAT_GOOD)
3747 pqi_free_io_request(io_request);
3750 pqi_ctrl_unbusy(ctrl_info);
3751 up(&ctrl_info->sync_request_sem);
3756 static int pqi_validate_admin_response(
3757 struct pqi_general_admin_response *response, u8 expected_function_code)
3759 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3762 if (get_unaligned_le16(&response->header.iu_length) !=
3763 PQI_GENERAL_ADMIN_IU_LENGTH)
3766 if (response->function_code != expected_function_code)
3769 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3775 static int pqi_submit_admin_request_synchronous(
3776 struct pqi_ctrl_info *ctrl_info,
3777 struct pqi_general_admin_request *request,
3778 struct pqi_general_admin_response *response)
3782 pqi_submit_admin_request(ctrl_info, request);
3784 rc = pqi_poll_for_admin_response(ctrl_info, response);
3787 rc = pqi_validate_admin_response(response,
3788 request->function_code);
3793 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3796 struct pqi_general_admin_request request;
3797 struct pqi_general_admin_response response;
3798 struct pqi_device_capability *capability;
3799 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3801 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3805 memset(&request, 0, sizeof(request));
3807 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3808 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3809 &request.header.iu_length);
3810 request.function_code =
3811 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3812 put_unaligned_le32(sizeof(*capability),
3813 &request.data.report_device_capability.buffer_length);
3815 rc = pqi_map_single(ctrl_info->pci_dev,
3816 &request.data.report_device_capability.sg_descriptor,
3817 capability, sizeof(*capability),
3818 PCI_DMA_FROMDEVICE);
3822 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3825 pqi_pci_unmap(ctrl_info->pci_dev,
3826 &request.data.report_device_capability.sg_descriptor, 1,
3827 PCI_DMA_FROMDEVICE);
3832 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3837 ctrl_info->max_inbound_queues =
3838 get_unaligned_le16(&capability->max_inbound_queues);
3839 ctrl_info->max_elements_per_iq =
3840 get_unaligned_le16(&capability->max_elements_per_iq);
3841 ctrl_info->max_iq_element_length =
3842 get_unaligned_le16(&capability->max_iq_element_length)
3844 ctrl_info->max_outbound_queues =
3845 get_unaligned_le16(&capability->max_outbound_queues);
3846 ctrl_info->max_elements_per_oq =
3847 get_unaligned_le16(&capability->max_elements_per_oq);
3848 ctrl_info->max_oq_element_length =
3849 get_unaligned_le16(&capability->max_oq_element_length)
3852 sop_iu_layer_descriptor =
3853 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3855 ctrl_info->max_inbound_iu_length_per_firmware =
3857 &sop_iu_layer_descriptor->max_inbound_iu_length);
3858 ctrl_info->inbound_spanning_supported =
3859 sop_iu_layer_descriptor->inbound_spanning_supported;
3860 ctrl_info->outbound_spanning_supported =
3861 sop_iu_layer_descriptor->outbound_spanning_supported;
3869 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3871 if (ctrl_info->max_iq_element_length <
3872 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3873 dev_err(&ctrl_info->pci_dev->dev,
3874 "max. inbound queue element length of %d is less than the required length of %d\n",
3875 ctrl_info->max_iq_element_length,
3876 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3880 if (ctrl_info->max_oq_element_length <
3881 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3882 dev_err(&ctrl_info->pci_dev->dev,
3883 "max. outbound queue element length of %d is less than the required length of %d\n",
3884 ctrl_info->max_oq_element_length,
3885 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3889 if (ctrl_info->max_inbound_iu_length_per_firmware <
3890 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3891 dev_err(&ctrl_info->pci_dev->dev,
3892 "max. inbound IU length of %u is less than the min. required length of %d\n",
3893 ctrl_info->max_inbound_iu_length_per_firmware,
3894 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3898 if (!ctrl_info->inbound_spanning_supported) {
3899 dev_err(&ctrl_info->pci_dev->dev,
3900 "the controller does not support inbound spanning\n");
3904 if (ctrl_info->outbound_spanning_supported) {
3905 dev_err(&ctrl_info->pci_dev->dev,
3906 "the controller supports outbound spanning but this driver does not\n");
3913 static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3914 bool inbound_queue, u16 queue_id)
3916 struct pqi_general_admin_request request;
3917 struct pqi_general_admin_response response;
3919 memset(&request, 0, sizeof(request));
3920 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3921 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3922 &request.header.iu_length);
3924 request.function_code =
3925 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3927 request.function_code =
3928 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3929 put_unaligned_le16(queue_id,
3930 &request.data.delete_operational_queue.queue_id);
3932 return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3936 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3939 struct pqi_event_queue *event_queue;
3940 struct pqi_general_admin_request request;
3941 struct pqi_general_admin_response response;
3943 event_queue = &ctrl_info->event_queue;
3946 * Create OQ (Outbound Queue - device to host queue) to dedicate
3949 memset(&request, 0, sizeof(request));
3950 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3951 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3952 &request.header.iu_length);
3953 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3954 put_unaligned_le16(event_queue->oq_id,
3955 &request.data.create_operational_oq.queue_id);
3956 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3957 &request.data.create_operational_oq.element_array_addr);
3958 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3959 &request.data.create_operational_oq.pi_addr);
3960 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3961 &request.data.create_operational_oq.num_elements);
3962 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3963 &request.data.create_operational_oq.element_length);
3964 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3965 put_unaligned_le16(event_queue->int_msg_num,
3966 &request.data.create_operational_oq.int_msg_num);
3968 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3973 event_queue->oq_ci = ctrl_info->iomem_base +
3974 PQI_DEVICE_REGISTERS_OFFSET +
3976 &response.data.create_operational_oq.oq_ci_offset);
3981 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
3982 unsigned int group_number)
3985 struct pqi_queue_group *queue_group;
3986 struct pqi_general_admin_request request;
3987 struct pqi_general_admin_response response;
3989 queue_group = &ctrl_info->queue_groups[group_number];
3992 * Create IQ (Inbound Queue - host to device queue) for
3995 memset(&request, 0, sizeof(request));
3996 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3997 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3998 &request.header.iu_length);
3999 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4000 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4001 &request.data.create_operational_iq.queue_id);
4003 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4004 &request.data.create_operational_iq.element_array_addr);
4005 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4006 &request.data.create_operational_iq.ci_addr);
4007 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4008 &request.data.create_operational_iq.num_elements);
4009 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4010 &request.data.create_operational_iq.element_length);
4011 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4013 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4016 dev_err(&ctrl_info->pci_dev->dev,
4017 "error creating inbound RAID queue\n");
4021 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4022 PQI_DEVICE_REGISTERS_OFFSET +
4024 &response.data.create_operational_iq.iq_pi_offset);
4027 * Create IQ (Inbound Queue - host to device queue) for
4028 * Advanced I/O (AIO) path.
4030 memset(&request, 0, sizeof(request));
4031 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4032 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4033 &request.header.iu_length);
4034 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4035 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4036 &request.data.create_operational_iq.queue_id);
4037 put_unaligned_le64((u64)queue_group->
4038 iq_element_array_bus_addr[AIO_PATH],
4039 &request.data.create_operational_iq.element_array_addr);
4040 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4041 &request.data.create_operational_iq.ci_addr);
4042 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4043 &request.data.create_operational_iq.num_elements);
4044 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4045 &request.data.create_operational_iq.element_length);
4046 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4048 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4051 dev_err(&ctrl_info->pci_dev->dev,
4052 "error creating inbound AIO queue\n");
4053 goto delete_inbound_queue_raid;
4056 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4057 PQI_DEVICE_REGISTERS_OFFSET +
4059 &response.data.create_operational_iq.iq_pi_offset);
4062 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4063 * assumed to be for RAID path I/O unless we change the queue's
4066 memset(&request, 0, sizeof(request));
4067 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4068 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4069 &request.header.iu_length);
4070 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4071 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4072 &request.data.change_operational_iq_properties.queue_id);
4073 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4074 &request.data.change_operational_iq_properties.vendor_specific);
4076 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4079 dev_err(&ctrl_info->pci_dev->dev,
4080 "error changing queue property\n");
4081 goto delete_inbound_queue_aio;
4085 * Create OQ (Outbound Queue - device to host queue).
4087 memset(&request, 0, sizeof(request));
4088 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4089 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4090 &request.header.iu_length);
4091 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4092 put_unaligned_le16(queue_group->oq_id,
4093 &request.data.create_operational_oq.queue_id);
4094 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4095 &request.data.create_operational_oq.element_array_addr);
4096 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4097 &request.data.create_operational_oq.pi_addr);
4098 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4099 &request.data.create_operational_oq.num_elements);
4100 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4101 &request.data.create_operational_oq.element_length);
4102 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4103 put_unaligned_le16(queue_group->int_msg_num,
4104 &request.data.create_operational_oq.int_msg_num);
4106 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4109 dev_err(&ctrl_info->pci_dev->dev,
4110 "error creating outbound queue\n");
4111 goto delete_inbound_queue_aio;
4114 queue_group->oq_ci = ctrl_info->iomem_base +
4115 PQI_DEVICE_REGISTERS_OFFSET +
4117 &response.data.create_operational_oq.oq_ci_offset);
4121 delete_inbound_queue_aio:
4122 pqi_delete_operational_queue(ctrl_info, true,
4123 queue_group->iq_id[AIO_PATH]);
4125 delete_inbound_queue_raid:
4126 pqi_delete_operational_queue(ctrl_info, true,
4127 queue_group->iq_id[RAID_PATH]);
4132 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4137 rc = pqi_create_event_queue(ctrl_info);
4139 dev_err(&ctrl_info->pci_dev->dev,
4140 "error creating event queue\n");
4144 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4145 rc = pqi_create_queue_group(ctrl_info, i);
4147 dev_err(&ctrl_info->pci_dev->dev,
4148 "error creating queue group number %u/%u\n",
4149 i, ctrl_info->num_queue_groups);
4157 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4158 (offsetof(struct pqi_event_config, descriptors) + \
4159 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4161 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4166 struct pqi_event_config *event_config;
4167 struct pqi_event_descriptor *event_descriptor;
4168 struct pqi_general_management_request request;
4170 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4175 memset(&request, 0, sizeof(request));
4177 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4178 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4179 data.report_event_configuration.sg_descriptors[1]) -
4180 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4181 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4182 &request.data.report_event_configuration.buffer_length);
4184 rc = pqi_map_single(ctrl_info->pci_dev,
4185 request.data.report_event_configuration.sg_descriptors,
4186 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4187 PCI_DMA_FROMDEVICE);
4191 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4192 0, NULL, NO_TIMEOUT);
4194 pqi_pci_unmap(ctrl_info->pci_dev,
4195 request.data.report_event_configuration.sg_descriptors, 1,
4196 PCI_DMA_FROMDEVICE);
4201 for (i = 0; i < event_config->num_event_descriptors; i++) {
4202 event_descriptor = &event_config->descriptors[i];
4203 if (enable_events &&
4204 pqi_is_supported_event(event_descriptor->event_type))
4205 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4206 &event_descriptor->oq_id);
4208 put_unaligned_le16(0, &event_descriptor->oq_id);
4211 memset(&request, 0, sizeof(request));
4213 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4214 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4215 data.report_event_configuration.sg_descriptors[1]) -
4216 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4217 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4218 &request.data.report_event_configuration.buffer_length);
4220 rc = pqi_map_single(ctrl_info->pci_dev,
4221 request.data.report_event_configuration.sg_descriptors,
4222 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4227 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4230 pqi_pci_unmap(ctrl_info->pci_dev,
4231 request.data.report_event_configuration.sg_descriptors, 1,
4235 kfree(event_config);
4240 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4242 return pqi_configure_events(ctrl_info, true);
4245 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4247 return pqi_configure_events(ctrl_info, false);
4250 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4254 size_t sg_chain_buffer_length;
4255 struct pqi_io_request *io_request;
4257 if (!ctrl_info->io_request_pool)
4260 dev = &ctrl_info->pci_dev->dev;
4261 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4262 io_request = ctrl_info->io_request_pool;
4264 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4265 kfree(io_request->iu);
4266 if (!io_request->sg_chain_buffer)
4268 dma_free_coherent(dev, sg_chain_buffer_length,
4269 io_request->sg_chain_buffer,
4270 io_request->sg_chain_buffer_dma_handle);
4274 kfree(ctrl_info->io_request_pool);
4275 ctrl_info->io_request_pool = NULL;
4278 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4280 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4281 ctrl_info->error_buffer_length,
4282 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4284 if (!ctrl_info->error_buffer)
4290 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4293 void *sg_chain_buffer;
4294 size_t sg_chain_buffer_length;
4295 dma_addr_t sg_chain_buffer_dma_handle;
4297 struct pqi_io_request *io_request;
4299 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4300 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4302 if (!ctrl_info->io_request_pool) {
4303 dev_err(&ctrl_info->pci_dev->dev,
4304 "failed to allocate I/O request pool\n");
4308 dev = &ctrl_info->pci_dev->dev;
4309 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4310 io_request = ctrl_info->io_request_pool;
4312 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4314 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4316 if (!io_request->iu) {
4317 dev_err(&ctrl_info->pci_dev->dev,
4318 "failed to allocate IU buffers\n");
4322 sg_chain_buffer = dma_alloc_coherent(dev,
4323 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4326 if (!sg_chain_buffer) {
4327 dev_err(&ctrl_info->pci_dev->dev,
4328 "failed to allocate PQI scatter-gather chain buffers\n");
4332 io_request->index = i;
4333 io_request->sg_chain_buffer = sg_chain_buffer;
4334 io_request->sg_chain_buffer_dma_handle =
4335 sg_chain_buffer_dma_handle;
4342 pqi_free_all_io_requests(ctrl_info);
4348 * Calculate required resources that are sized based on max. outstanding
4349 * requests and max. transfer size.
4352 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4354 u32 max_transfer_size;
4357 ctrl_info->scsi_ml_can_queue =
4358 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4359 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4361 ctrl_info->error_buffer_length =
4362 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4365 max_transfer_size = min(ctrl_info->max_transfer_size,
4366 PQI_MAX_TRANSFER_SIZE_KDUMP);
4368 max_transfer_size = min(ctrl_info->max_transfer_size,
4369 PQI_MAX_TRANSFER_SIZE);
4371 max_sg_entries = max_transfer_size / PAGE_SIZE;
4373 /* +1 to cover when the buffer is not page-aligned. */
4376 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4378 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4380 ctrl_info->sg_chain_buffer_length =
4381 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4382 PQI_EXTRA_SGL_MEMORY;
4383 ctrl_info->sg_tablesize = max_sg_entries;
4384 ctrl_info->max_sectors = max_transfer_size / 512;
4387 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4389 int num_queue_groups;
4390 u16 num_elements_per_iq;
4391 u16 num_elements_per_oq;
4393 if (reset_devices) {
4394 num_queue_groups = 1;
4397 int max_queue_groups;
4399 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4400 ctrl_info->max_outbound_queues - 1);
4401 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4403 num_cpus = num_online_cpus();
4404 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4405 num_queue_groups = min(num_queue_groups, max_queue_groups);
4408 ctrl_info->num_queue_groups = num_queue_groups;
4409 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
4412 * Make sure that the max. inbound IU length is an even multiple
4413 * of our inbound element length.
4415 ctrl_info->max_inbound_iu_length =
4416 (ctrl_info->max_inbound_iu_length_per_firmware /
4417 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4418 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4420 num_elements_per_iq =
4421 (ctrl_info->max_inbound_iu_length /
4422 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4424 /* Add one because one element in each queue is unusable. */
4425 num_elements_per_iq++;
4427 num_elements_per_iq = min(num_elements_per_iq,
4428 ctrl_info->max_elements_per_iq);
4430 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4431 num_elements_per_oq = min(num_elements_per_oq,
4432 ctrl_info->max_elements_per_oq);
4434 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4435 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4437 ctrl_info->max_sg_per_iu =
4438 ((ctrl_info->max_inbound_iu_length -
4439 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4440 sizeof(struct pqi_sg_descriptor)) +
4441 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4444 static inline void pqi_set_sg_descriptor(
4445 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4447 u64 address = (u64)sg_dma_address(sg);
4448 unsigned int length = sg_dma_len(sg);
4450 put_unaligned_le64(address, &sg_descriptor->address);
4451 put_unaligned_le32(length, &sg_descriptor->length);
4452 put_unaligned_le32(0, &sg_descriptor->flags);
4455 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4456 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4457 struct pqi_io_request *io_request)
4463 unsigned int num_sg_in_iu;
4464 unsigned int max_sg_per_iu;
4465 struct scatterlist *sg;
4466 struct pqi_sg_descriptor *sg_descriptor;
4468 sg_count = scsi_dma_map(scmd);
4472 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4473 PQI_REQUEST_HEADER_LENGTH;
4478 sg = scsi_sglist(scmd);
4479 sg_descriptor = request->sg_descriptors;
4480 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4486 pqi_set_sg_descriptor(sg_descriptor, sg);
4493 if (i == max_sg_per_iu) {
4495 (u64)io_request->sg_chain_buffer_dma_handle,
4496 &sg_descriptor->address);
4497 put_unaligned_le32((sg_count - num_sg_in_iu)
4498 * sizeof(*sg_descriptor),
4499 &sg_descriptor->length);
4500 put_unaligned_le32(CISS_SG_CHAIN,
4501 &sg_descriptor->flags);
4504 sg_descriptor = io_request->sg_chain_buffer;
4509 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4510 request->partial = chained;
4511 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4514 put_unaligned_le16(iu_length, &request->header.iu_length);
4519 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4520 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4521 struct pqi_io_request *io_request)
4527 unsigned int num_sg_in_iu;
4528 unsigned int max_sg_per_iu;
4529 struct scatterlist *sg;
4530 struct pqi_sg_descriptor *sg_descriptor;
4532 sg_count = scsi_dma_map(scmd);
4536 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4537 PQI_REQUEST_HEADER_LENGTH;
4543 sg = scsi_sglist(scmd);
4544 sg_descriptor = request->sg_descriptors;
4545 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4550 pqi_set_sg_descriptor(sg_descriptor, sg);
4557 if (i == max_sg_per_iu) {
4559 (u64)io_request->sg_chain_buffer_dma_handle,
4560 &sg_descriptor->address);
4561 put_unaligned_le32((sg_count - num_sg_in_iu)
4562 * sizeof(*sg_descriptor),
4563 &sg_descriptor->length);
4564 put_unaligned_le32(CISS_SG_CHAIN,
4565 &sg_descriptor->flags);
4568 sg_descriptor = io_request->sg_chain_buffer;
4573 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4574 request->partial = chained;
4575 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4578 put_unaligned_le16(iu_length, &request->header.iu_length);
4579 request->num_sg_descriptors = num_sg_in_iu;
4584 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4587 struct scsi_cmnd *scmd;
4589 scmd = io_request->scmd;
4590 pqi_free_io_request(io_request);
4591 scsi_dma_unmap(scmd);
4592 pqi_scsi_done(scmd);
4595 static int pqi_raid_submit_scsi_cmd_with_io_request(
4596 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
4597 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4598 struct pqi_queue_group *queue_group)
4602 struct pqi_raid_path_request *request;
4604 io_request->io_complete_callback = pqi_raid_io_complete;
4605 io_request->scmd = scmd;
4607 request = io_request->iu;
4609 offsetof(struct pqi_raid_path_request, sg_descriptors));
4611 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4612 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4613 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4614 put_unaligned_le16(io_request->index, &request->request_id);
4615 request->error_index = request->request_id;
4616 memcpy(request->lun_number, device->scsi3addr,
4617 sizeof(request->lun_number));
4619 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4620 memcpy(request->cdb, scmd->cmnd, cdb_length);
4622 switch (cdb_length) {
4627 /* No bytes in the Additional CDB bytes field */
4628 request->additional_cdb_bytes_usage =
4629 SOP_ADDITIONAL_CDB_BYTES_0;
4632 /* 4 bytes in the Additional cdb field */
4633 request->additional_cdb_bytes_usage =
4634 SOP_ADDITIONAL_CDB_BYTES_4;
4637 /* 8 bytes in the Additional cdb field */
4638 request->additional_cdb_bytes_usage =
4639 SOP_ADDITIONAL_CDB_BYTES_8;
4642 /* 12 bytes in the Additional cdb field */
4643 request->additional_cdb_bytes_usage =
4644 SOP_ADDITIONAL_CDB_BYTES_12;
4648 /* 16 bytes in the Additional cdb field */
4649 request->additional_cdb_bytes_usage =
4650 SOP_ADDITIONAL_CDB_BYTES_16;
4654 switch (scmd->sc_data_direction) {
4656 request->data_direction = SOP_READ_FLAG;
4658 case DMA_FROM_DEVICE:
4659 request->data_direction = SOP_WRITE_FLAG;
4662 request->data_direction = SOP_NO_DIRECTION_FLAG;
4664 case DMA_BIDIRECTIONAL:
4665 request->data_direction = SOP_BIDIRECTIONAL;
4668 dev_err(&ctrl_info->pci_dev->dev,
4669 "unknown data direction: %d\n",
4670 scmd->sc_data_direction);
4674 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4676 pqi_free_io_request(io_request);
4677 return SCSI_MLQUEUE_HOST_BUSY;
4680 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4685 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4686 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4687 struct pqi_queue_group *queue_group)
4689 struct pqi_io_request *io_request;
4691 io_request = pqi_alloc_io_request(ctrl_info);
4693 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4694 device, scmd, queue_group);
4697 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
4699 if (!pqi_ctrl_blocked(ctrl_info))
4700 schedule_work(&ctrl_info->raid_bypass_retry_work);
4703 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
4705 struct scsi_cmnd *scmd;
4706 struct pqi_scsi_dev *device;
4707 struct pqi_ctrl_info *ctrl_info;
4709 if (!io_request->raid_bypass)
4712 scmd = io_request->scmd;
4713 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
4715 if (host_byte(scmd->result) == DID_NO_CONNECT)
4718 device = scmd->device->hostdata;
4719 if (pqi_device_offline(device))
4722 ctrl_info = shost_to_hba(scmd->device->host);
4723 if (pqi_ctrl_offline(ctrl_info))
4729 static inline void pqi_add_to_raid_bypass_retry_list(
4730 struct pqi_ctrl_info *ctrl_info,
4731 struct pqi_io_request *io_request, bool at_head)
4733 unsigned long flags;
4735 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4737 list_add(&io_request->request_list_entry,
4738 &ctrl_info->raid_bypass_retry_list);
4740 list_add_tail(&io_request->request_list_entry,
4741 &ctrl_info->raid_bypass_retry_list);
4742 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4745 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
4748 struct scsi_cmnd *scmd;
4750 scmd = io_request->scmd;
4751 pqi_free_io_request(io_request);
4752 pqi_scsi_done(scmd);
4755 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
4757 struct scsi_cmnd *scmd;
4758 struct pqi_ctrl_info *ctrl_info;
4760 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
4761 scmd = io_request->scmd;
4763 ctrl_info = shost_to_hba(scmd->device->host);
4765 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
4766 pqi_schedule_bypass_retry(ctrl_info);
4769 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
4771 struct scsi_cmnd *scmd;
4772 struct pqi_scsi_dev *device;
4773 struct pqi_ctrl_info *ctrl_info;
4774 struct pqi_queue_group *queue_group;
4776 scmd = io_request->scmd;
4777 device = scmd->device->hostdata;
4778 if (pqi_device_in_reset(device)) {
4779 pqi_free_io_request(io_request);
4780 set_host_byte(scmd, DID_RESET);
4781 pqi_scsi_done(scmd);
4785 ctrl_info = shost_to_hba(scmd->device->host);
4786 queue_group = io_request->queue_group;
4788 pqi_reinit_io_request(io_request);
4790 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4791 device, scmd, queue_group);
4794 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
4795 struct pqi_ctrl_info *ctrl_info)
4797 unsigned long flags;
4798 struct pqi_io_request *io_request;
4800 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4801 io_request = list_first_entry_or_null(
4802 &ctrl_info->raid_bypass_retry_list,
4803 struct pqi_io_request, request_list_entry);
4805 list_del(&io_request->request_list_entry);
4806 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4811 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
4814 struct pqi_io_request *io_request;
4816 pqi_ctrl_busy(ctrl_info);
4819 if (pqi_ctrl_blocked(ctrl_info))
4821 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
4824 rc = pqi_retry_raid_bypass(io_request);
4826 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
4828 pqi_schedule_bypass_retry(ctrl_info);
4833 pqi_ctrl_unbusy(ctrl_info);
4836 static void pqi_raid_bypass_retry_worker(struct work_struct *work)
4838 struct pqi_ctrl_info *ctrl_info;
4840 ctrl_info = container_of(work, struct pqi_ctrl_info,
4841 raid_bypass_retry_work);
4842 pqi_retry_raid_bypass_requests(ctrl_info);
4845 static void pqi_clear_all_queued_raid_bypass_retries(
4846 struct pqi_ctrl_info *ctrl_info)
4848 unsigned long flags;
4850 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4851 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
4852 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4855 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4858 struct scsi_cmnd *scmd;
4860 scmd = io_request->scmd;
4861 scsi_dma_unmap(scmd);
4862 if (io_request->status == -EAGAIN)
4863 set_host_byte(scmd, DID_IMM_RETRY);
4864 else if (pqi_raid_bypass_retry_needed(io_request)) {
4865 pqi_queue_raid_bypass_retry(io_request);
4868 pqi_free_io_request(io_request);
4869 pqi_scsi_done(scmd);
4872 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4873 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4874 struct pqi_queue_group *queue_group)
4876 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4877 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
4880 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4881 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4882 unsigned int cdb_length, struct pqi_queue_group *queue_group,
4883 struct pqi_encryption_info *encryption_info, bool raid_bypass)
4886 struct pqi_io_request *io_request;
4887 struct pqi_aio_path_request *request;
4889 io_request = pqi_alloc_io_request(ctrl_info);
4890 io_request->io_complete_callback = pqi_aio_io_complete;
4891 io_request->scmd = scmd;
4892 io_request->raid_bypass = raid_bypass;
4894 request = io_request->iu;
4896 offsetof(struct pqi_raid_path_request, sg_descriptors));
4898 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4899 put_unaligned_le32(aio_handle, &request->nexus_id);
4900 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4901 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4902 put_unaligned_le16(io_request->index, &request->request_id);
4903 request->error_index = request->request_id;
4904 if (cdb_length > sizeof(request->cdb))
4905 cdb_length = sizeof(request->cdb);
4906 request->cdb_length = cdb_length;
4907 memcpy(request->cdb, cdb, cdb_length);
4909 switch (scmd->sc_data_direction) {
4911 request->data_direction = SOP_READ_FLAG;
4913 case DMA_FROM_DEVICE:
4914 request->data_direction = SOP_WRITE_FLAG;
4917 request->data_direction = SOP_NO_DIRECTION_FLAG;
4919 case DMA_BIDIRECTIONAL:
4920 request->data_direction = SOP_BIDIRECTIONAL;
4923 dev_err(&ctrl_info->pci_dev->dev,
4924 "unknown data direction: %d\n",
4925 scmd->sc_data_direction);
4929 if (encryption_info) {
4930 request->encryption_enable = true;
4931 put_unaligned_le16(encryption_info->data_encryption_key_index,
4932 &request->data_encryption_key_index);
4933 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4934 &request->encrypt_tweak_lower);
4935 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4936 &request->encrypt_tweak_upper);
4939 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4941 pqi_free_io_request(io_request);
4942 return SCSI_MLQUEUE_HOST_BUSY;
4945 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4950 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
4951 struct scsi_cmnd *scmd)
4955 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4956 if (hw_queue > ctrl_info->max_hw_queue_index)
4963 * This function gets called just before we hand the completed SCSI request
4967 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
4969 struct pqi_scsi_dev *device;
4971 device = scmd->device->hostdata;
4972 atomic_dec(&device->scsi_cmds_outstanding);
4975 static int pqi_scsi_queue_command(struct Scsi_Host *shost,
4976 struct scsi_cmnd *scmd)
4979 struct pqi_ctrl_info *ctrl_info;
4980 struct pqi_scsi_dev *device;
4982 struct pqi_queue_group *queue_group;
4985 device = scmd->device->hostdata;
4986 ctrl_info = shost_to_hba(shost);
4988 atomic_inc(&device->scsi_cmds_outstanding);
4990 if (pqi_ctrl_offline(ctrl_info)) {
4991 set_host_byte(scmd, DID_NO_CONNECT);
4992 pqi_scsi_done(scmd);
4996 pqi_ctrl_busy(ctrl_info);
4997 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
4998 rc = SCSI_MLQUEUE_HOST_BUSY;
5003 * This is necessary because the SML doesn't zero out this field during
5008 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5009 queue_group = &ctrl_info->queue_groups[hw_queue];
5011 if (pqi_is_logical_device(device)) {
5012 raid_bypassed = false;
5013 if (device->raid_bypass_enabled &&
5014 !blk_rq_is_passthrough(scmd->request)) {
5015 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
5017 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
5018 raid_bypassed = true;
5021 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5024 if (device->aio_enabled)
5025 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
5028 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5033 pqi_ctrl_unbusy(ctrl_info);
5035 atomic_dec(&device->scsi_cmds_outstanding);
5040 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5041 struct pqi_queue_group *queue_group)
5044 unsigned long flags;
5047 for (path = 0; path < 2; path++) {
5050 &queue_group->submit_lock[path], flags);
5052 list_empty(&queue_group->request_list[path]);
5053 spin_unlock_irqrestore(
5054 &queue_group->submit_lock[path], flags);
5057 pqi_check_ctrl_health(ctrl_info);
5058 if (pqi_ctrl_offline(ctrl_info))
5060 usleep_range(1000, 2000);
5067 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5072 struct pqi_queue_group *queue_group;
5076 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5077 queue_group = &ctrl_info->queue_groups[i];
5079 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5083 for (path = 0; path < 2; path++) {
5084 iq_pi = queue_group->iq_pi_copy[path];
5087 iq_ci = *queue_group->iq_ci[path];
5090 pqi_check_ctrl_health(ctrl_info);
5091 if (pqi_ctrl_offline(ctrl_info))
5093 usleep_range(1000, 2000);
5101 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5102 struct pqi_scsi_dev *device)
5106 struct pqi_queue_group *queue_group;
5107 unsigned long flags;
5108 struct pqi_io_request *io_request;
5109 struct pqi_io_request *next;
5110 struct scsi_cmnd *scmd;
5111 struct pqi_scsi_dev *scsi_device;
5113 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5114 queue_group = &ctrl_info->queue_groups[i];
5116 for (path = 0; path < 2; path++) {
5118 &queue_group->submit_lock[path], flags);
5120 list_for_each_entry_safe(io_request, next,
5121 &queue_group->request_list[path],
5122 request_list_entry) {
5123 scmd = io_request->scmd;
5127 scsi_device = scmd->device->hostdata;
5128 if (scsi_device != device)
5131 list_del(&io_request->request_list_entry);
5132 set_host_byte(scmd, DID_RESET);
5133 pqi_scsi_done(scmd);
5136 spin_unlock_irqrestore(
5137 &queue_group->submit_lock[path], flags);
5142 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5143 struct pqi_scsi_dev *device)
5145 while (atomic_read(&device->scsi_cmds_outstanding)) {
5146 pqi_check_ctrl_health(ctrl_info);
5147 if (pqi_ctrl_offline(ctrl_info))
5149 usleep_range(1000, 2000);
5155 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
5158 unsigned long flags;
5159 struct pqi_scsi_dev *device;
5164 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5165 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5166 scsi_device_list_entry) {
5167 if (atomic_read(&device->scsi_cmds_outstanding)) {
5172 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5178 pqi_check_ctrl_health(ctrl_info);
5179 if (pqi_ctrl_offline(ctrl_info))
5182 usleep_range(1000, 2000);
5188 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5191 struct completion *waiting = context;
5196 #define PQI_LUN_RESET_TIMEOUT_SECS 10
5198 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5199 struct pqi_scsi_dev *device, struct completion *wait)
5204 if (wait_for_completion_io_timeout(wait,
5205 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
5210 pqi_check_ctrl_health(ctrl_info);
5211 if (pqi_ctrl_offline(ctrl_info)) {
5220 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
5221 struct pqi_scsi_dev *device)
5224 struct pqi_io_request *io_request;
5225 DECLARE_COMPLETION_ONSTACK(wait);
5226 struct pqi_task_management_request *request;
5228 io_request = pqi_alloc_io_request(ctrl_info);
5229 io_request->io_complete_callback = pqi_lun_reset_complete;
5230 io_request->context = &wait;
5232 request = io_request->iu;
5233 memset(request, 0, sizeof(*request));
5235 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5236 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5237 &request->header.iu_length);
5238 put_unaligned_le16(io_request->index, &request->request_id);
5239 memcpy(request->lun_number, device->scsi3addr,
5240 sizeof(request->lun_number));
5241 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5243 pqi_start_io(ctrl_info,
5244 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5247 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5249 rc = io_request->status;
5251 pqi_free_io_request(io_request);
5256 /* Performs a reset at the LUN level. */
5258 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5259 struct pqi_scsi_dev *device)
5263 rc = pqi_lun_reset(ctrl_info, device);
5265 rc = pqi_device_wait_for_pending_io(ctrl_info, device);
5267 return rc == 0 ? SUCCESS : FAILED;
5270 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5273 struct Scsi_Host *shost;
5274 struct pqi_ctrl_info *ctrl_info;
5275 struct pqi_scsi_dev *device;
5277 shost = scmd->device->host;
5278 ctrl_info = shost_to_hba(shost);
5279 device = scmd->device->hostdata;
5281 dev_err(&ctrl_info->pci_dev->dev,
5282 "resetting scsi %d:%d:%d:%d\n",
5283 shost->host_no, device->bus, device->target, device->lun);
5285 pqi_check_ctrl_health(ctrl_info);
5286 if (pqi_ctrl_offline(ctrl_info)) {
5291 mutex_lock(&ctrl_info->lun_reset_mutex);
5293 pqi_ctrl_block_requests(ctrl_info);
5294 pqi_ctrl_wait_until_quiesced(ctrl_info);
5295 pqi_fail_io_queued_for_device(ctrl_info, device);
5296 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5297 pqi_device_reset_start(device);
5298 pqi_ctrl_unblock_requests(ctrl_info);
5303 rc = pqi_device_reset(ctrl_info, device);
5305 pqi_device_reset_done(device);
5307 mutex_unlock(&ctrl_info->lun_reset_mutex);
5310 dev_err(&ctrl_info->pci_dev->dev,
5311 "reset of scsi %d:%d:%d:%d: %s\n",
5312 shost->host_no, device->bus, device->target, device->lun,
5313 rc == SUCCESS ? "SUCCESS" : "FAILED");
5318 static int pqi_slave_alloc(struct scsi_device *sdev)
5320 struct pqi_scsi_dev *device;
5321 unsigned long flags;
5322 struct pqi_ctrl_info *ctrl_info;
5323 struct scsi_target *starget;
5324 struct sas_rphy *rphy;
5326 ctrl_info = shost_to_hba(sdev->host);
5328 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5330 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5331 starget = scsi_target(sdev);
5332 rphy = target_to_rphy(starget);
5333 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5335 device->target = sdev_id(sdev);
5336 device->lun = sdev->lun;
5337 device->target_lun_valid = true;
5340 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5341 sdev_id(sdev), sdev->lun);
5345 sdev->hostdata = device;
5346 device->sdev = sdev;
5347 if (device->queue_depth) {
5348 device->advertised_queue_depth = device->queue_depth;
5349 scsi_change_queue_depth(sdev,
5350 device->advertised_queue_depth);
5354 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5359 static int pqi_map_queues(struct Scsi_Host *shost)
5361 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5363 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
5366 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5369 struct pci_dev *pci_dev;
5370 u32 subsystem_vendor;
5371 u32 subsystem_device;
5372 cciss_pci_info_struct pciinfo;
5377 pci_dev = ctrl_info->pci_dev;
5379 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5380 pciinfo.bus = pci_dev->bus->number;
5381 pciinfo.dev_fn = pci_dev->devfn;
5382 subsystem_vendor = pci_dev->subsystem_vendor;
5383 subsystem_device = pci_dev->subsystem_device;
5384 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5387 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5393 static int pqi_getdrivver_ioctl(void __user *arg)
5400 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5401 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5403 if (copy_to_user(arg, &version, sizeof(version)))
5409 struct ciss_error_info {
5412 size_t sense_data_length;
5415 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5416 struct ciss_error_info *ciss_error_info)
5418 int ciss_cmd_status;
5419 size_t sense_data_length;
5421 switch (pqi_error_info->data_out_result) {
5422 case PQI_DATA_IN_OUT_GOOD:
5423 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5425 case PQI_DATA_IN_OUT_UNDERFLOW:
5426 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5428 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5429 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5431 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5432 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5433 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5434 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5435 case PQI_DATA_IN_OUT_ERROR:
5436 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5438 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5439 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5440 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5441 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5442 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5443 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5444 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5445 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5446 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5447 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5448 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5450 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5451 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5453 case PQI_DATA_IN_OUT_ABORTED:
5454 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5456 case PQI_DATA_IN_OUT_TIMEOUT:
5457 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5460 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5465 get_unaligned_le16(&pqi_error_info->sense_data_length);
5466 if (sense_data_length == 0)
5468 get_unaligned_le16(&pqi_error_info->response_data_length);
5469 if (sense_data_length)
5470 if (sense_data_length > sizeof(pqi_error_info->data))
5471 sense_data_length = sizeof(pqi_error_info->data);
5473 ciss_error_info->scsi_status = pqi_error_info->status;
5474 ciss_error_info->command_status = ciss_cmd_status;
5475 ciss_error_info->sense_data_length = sense_data_length;
5478 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5481 char *kernel_buffer = NULL;
5483 size_t sense_data_length;
5484 IOCTL_Command_struct iocommand;
5485 struct pqi_raid_path_request request;
5486 struct pqi_raid_error_info pqi_error_info;
5487 struct ciss_error_info ciss_error_info;
5489 if (pqi_ctrl_offline(ctrl_info))
5493 if (!capable(CAP_SYS_RAWIO))
5495 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5497 if (iocommand.buf_size < 1 &&
5498 iocommand.Request.Type.Direction != XFER_NONE)
5500 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5502 if (iocommand.Request.Type.Type != TYPE_CMD)
5505 switch (iocommand.Request.Type.Direction) {
5509 case XFER_READ | XFER_WRITE:
5515 if (iocommand.buf_size > 0) {
5516 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5519 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5520 if (copy_from_user(kernel_buffer, iocommand.buf,
5521 iocommand.buf_size)) {
5526 memset(kernel_buffer, 0, iocommand.buf_size);
5530 memset(&request, 0, sizeof(request));
5532 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5533 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5534 PQI_REQUEST_HEADER_LENGTH;
5535 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5536 sizeof(request.lun_number));
5537 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5538 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5540 switch (iocommand.Request.Type.Direction) {
5542 request.data_direction = SOP_NO_DIRECTION_FLAG;
5545 request.data_direction = SOP_WRITE_FLAG;
5548 request.data_direction = SOP_READ_FLAG;
5550 case XFER_READ | XFER_WRITE:
5551 request.data_direction = SOP_BIDIRECTIONAL;
5555 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5557 if (iocommand.buf_size > 0) {
5558 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
5560 rc = pqi_map_single(ctrl_info->pci_dev,
5561 &request.sg_descriptors[0], kernel_buffer,
5562 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5566 iu_length += sizeof(request.sg_descriptors[0]);
5569 put_unaligned_le16(iu_length, &request.header.iu_length);
5571 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
5572 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
5574 if (iocommand.buf_size > 0)
5575 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
5576 PCI_DMA_BIDIRECTIONAL);
5578 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
5581 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
5582 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
5583 iocommand.error_info.CommandStatus =
5584 ciss_error_info.command_status;
5585 sense_data_length = ciss_error_info.sense_data_length;
5586 if (sense_data_length) {
5587 if (sense_data_length >
5588 sizeof(iocommand.error_info.SenseInfo))
5590 sizeof(iocommand.error_info.SenseInfo);
5591 memcpy(iocommand.error_info.SenseInfo,
5592 pqi_error_info.data, sense_data_length);
5593 iocommand.error_info.SenseLen = sense_data_length;
5597 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
5602 if (rc == 0 && iocommand.buf_size > 0 &&
5603 (iocommand.Request.Type.Direction & XFER_READ)) {
5604 if (copy_to_user(iocommand.buf, kernel_buffer,
5605 iocommand.buf_size)) {
5611 kfree(kernel_buffer);
5616 static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5619 struct pqi_ctrl_info *ctrl_info;
5621 ctrl_info = shost_to_hba(sdev->host);
5624 case CCISS_DEREGDISK:
5625 case CCISS_REGNEWDISK:
5627 rc = pqi_scan_scsi_devices(ctrl_info);
5629 case CCISS_GETPCIINFO:
5630 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5632 case CCISS_GETDRIVVER:
5633 rc = pqi_getdrivver_ioctl(arg);
5635 case CCISS_PASSTHRU:
5636 rc = pqi_passthru_ioctl(ctrl_info, arg);
5646 static ssize_t pqi_version_show(struct device *dev,
5647 struct device_attribute *attr, char *buffer)
5650 struct Scsi_Host *shost;
5651 struct pqi_ctrl_info *ctrl_info;
5653 shost = class_to_shost(dev);
5654 ctrl_info = shost_to_hba(shost);
5656 count += snprintf(buffer + count, PAGE_SIZE - count,
5657 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5659 count += snprintf(buffer + count, PAGE_SIZE - count,
5660 "firmware: %s\n", ctrl_info->firmware_version);
5665 static ssize_t pqi_host_rescan_store(struct device *dev,
5666 struct device_attribute *attr, const char *buffer, size_t count)
5668 struct Scsi_Host *shost = class_to_shost(dev);
5670 pqi_scan_start(shost);
5675 static ssize_t pqi_lockup_action_show(struct device *dev,
5676 struct device_attribute *attr, char *buffer)
5681 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5682 if (pqi_lockup_actions[i].action == pqi_lockup_action)
5683 count += snprintf(buffer + count, PAGE_SIZE - count,
5684 "[%s] ", pqi_lockup_actions[i].name);
5686 count += snprintf(buffer + count, PAGE_SIZE - count,
5687 "%s ", pqi_lockup_actions[i].name);
5690 count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
5695 static ssize_t pqi_lockup_action_store(struct device *dev,
5696 struct device_attribute *attr, const char *buffer, size_t count)
5700 char action_name_buffer[32];
5702 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
5703 action_name = strstrip(action_name_buffer);
5705 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5706 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
5707 pqi_lockup_action = pqi_lockup_actions[i].action;
5715 static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
5716 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
5717 static DEVICE_ATTR(lockup_action, 0644,
5718 pqi_lockup_action_show, pqi_lockup_action_store);
5720 static struct device_attribute *pqi_shost_attrs[] = {
5723 &dev_attr_lockup_action,
5727 static ssize_t pqi_sas_address_show(struct device *dev,
5728 struct device_attribute *attr, char *buffer)
5730 struct pqi_ctrl_info *ctrl_info;
5731 struct scsi_device *sdev;
5732 struct pqi_scsi_dev *device;
5733 unsigned long flags;
5736 sdev = to_scsi_device(dev);
5737 ctrl_info = shost_to_hba(sdev->host);
5739 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5741 device = sdev->hostdata;
5742 if (pqi_is_logical_device(device)) {
5743 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5747 sas_address = device->sas_address;
5749 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5751 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5754 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5755 struct device_attribute *attr, char *buffer)
5757 struct pqi_ctrl_info *ctrl_info;
5758 struct scsi_device *sdev;
5759 struct pqi_scsi_dev *device;
5760 unsigned long flags;
5762 sdev = to_scsi_device(dev);
5763 ctrl_info = shost_to_hba(sdev->host);
5765 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5767 device = sdev->hostdata;
5768 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
5772 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5777 static ssize_t pqi_raid_level_show(struct device *dev,
5778 struct device_attribute *attr, char *buffer)
5780 struct pqi_ctrl_info *ctrl_info;
5781 struct scsi_device *sdev;
5782 struct pqi_scsi_dev *device;
5783 unsigned long flags;
5786 sdev = to_scsi_device(dev);
5787 ctrl_info = shost_to_hba(sdev->host);
5789 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5791 device = sdev->hostdata;
5793 if (pqi_is_logical_device(device))
5794 raid_level = pqi_raid_level_to_string(device->raid_level);
5798 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5800 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
5803 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
5804 static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
5805 pqi_ssd_smart_path_enabled_show, NULL);
5806 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
5808 static struct device_attribute *pqi_sdev_attrs[] = {
5809 &dev_attr_sas_address,
5810 &dev_attr_ssd_smart_path_enabled,
5811 &dev_attr_raid_level,
5815 static struct scsi_host_template pqi_driver_template = {
5816 .module = THIS_MODULE,
5817 .name = DRIVER_NAME_SHORT,
5818 .proc_name = DRIVER_NAME_SHORT,
5819 .queuecommand = pqi_scsi_queue_command,
5820 .scan_start = pqi_scan_start,
5821 .scan_finished = pqi_scan_finished,
5823 .use_clustering = ENABLE_CLUSTERING,
5824 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5826 .slave_alloc = pqi_slave_alloc,
5827 .map_queues = pqi_map_queues,
5828 .sdev_attrs = pqi_sdev_attrs,
5829 .shost_attrs = pqi_shost_attrs,
5832 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5835 struct Scsi_Host *shost;
5837 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5839 dev_err(&ctrl_info->pci_dev->dev,
5840 "scsi_host_alloc failed for controller %u\n",
5841 ctrl_info->ctrl_id);
5846 shost->n_io_port = 0;
5847 shost->this_id = -1;
5848 shost->max_channel = PQI_MAX_BUS;
5849 shost->max_cmd_len = MAX_COMMAND_SIZE;
5850 shost->max_lun = ~0;
5852 shost->max_sectors = ctrl_info->max_sectors;
5853 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5854 shost->cmd_per_lun = shost->can_queue;
5855 shost->sg_tablesize = ctrl_info->sg_tablesize;
5856 shost->transportt = pqi_sas_transport_template;
5857 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
5858 shost->unique_id = shost->irq;
5859 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5860 shost->hostdata[0] = (unsigned long)ctrl_info;
5862 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5864 dev_err(&ctrl_info->pci_dev->dev,
5865 "scsi_add_host failed for controller %u\n",
5866 ctrl_info->ctrl_id);
5870 rc = pqi_add_sas_host(shost, ctrl_info);
5872 dev_err(&ctrl_info->pci_dev->dev,
5873 "add SAS host failed for controller %u\n",
5874 ctrl_info->ctrl_id);
5878 ctrl_info->scsi_host = shost;
5883 scsi_remove_host(shost);
5885 scsi_host_put(shost);
5890 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5892 struct Scsi_Host *shost;
5894 pqi_delete_sas_host(ctrl_info);
5896 shost = ctrl_info->scsi_host;
5900 scsi_remove_host(shost);
5901 scsi_host_put(shost);
5904 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
5907 struct pqi_device_registers __iomem *pqi_registers;
5908 unsigned long timeout;
5909 unsigned int timeout_msecs;
5910 union pqi_reset_register reset_reg;
5912 pqi_registers = ctrl_info->pqi_registers;
5913 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
5914 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
5917 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
5918 reset_reg.all_bits = readl(&pqi_registers->device_reset);
5919 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
5921 pqi_check_ctrl_health(ctrl_info);
5922 if (pqi_ctrl_offline(ctrl_info)) {
5926 if (time_after(jiffies, timeout)) {
5935 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5938 union pqi_reset_register reset_reg;
5940 if (ctrl_info->pqi_reset_quiesce_supported) {
5941 rc = sis_pqi_reset_quiesce(ctrl_info);
5943 dev_err(&ctrl_info->pci_dev->dev,
5944 "PQI reset failed during quiesce with error %d\n",
5950 reset_reg.all_bits = 0;
5951 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
5952 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
5954 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
5956 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
5958 dev_err(&ctrl_info->pci_dev->dev,
5959 "PQI reset failed with error %d\n", rc);
5964 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5967 struct bmic_identify_controller *identify;
5969 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5973 rc = pqi_identify_controller(ctrl_info, identify);
5977 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5978 sizeof(identify->firmware_version));
5979 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5980 snprintf(ctrl_info->firmware_version +
5981 strlen(ctrl_info->firmware_version),
5982 sizeof(ctrl_info->firmware_version),
5983 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5991 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
5995 void __iomem *table_iomem_addr;
5996 struct pqi_config_table *config_table;
5997 struct pqi_config_table_section_header *section;
5999 table_length = ctrl_info->config_table_length;
6001 config_table = kmalloc(table_length, GFP_KERNEL);
6002 if (!config_table) {
6003 dev_err(&ctrl_info->pci_dev->dev,
6004 "failed to allocate memory for PQI configuration table\n");
6009 * Copy the config table contents from I/O memory space into the
6012 table_iomem_addr = ctrl_info->iomem_base +
6013 ctrl_info->config_table_offset;
6014 memcpy_fromio(config_table, table_iomem_addr, table_length);
6017 get_unaligned_le32(&config_table->first_section_offset);
6019 while (section_offset) {
6020 section = (void *)config_table + section_offset;
6022 switch (get_unaligned_le16(§ion->section_id)) {
6023 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
6024 if (pqi_disable_heartbeat)
6025 dev_warn(&ctrl_info->pci_dev->dev,
6026 "heartbeat disabled by module parameter\n");
6028 ctrl_info->heartbeat_counter =
6032 struct pqi_config_table_heartbeat,
6038 get_unaligned_le16(§ion->next_section_offset);
6041 kfree(config_table);
6046 /* Switches the controller from PQI mode back into SIS mode. */
6048 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
6052 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
6053 rc = pqi_reset(ctrl_info);
6056 rc = sis_reenable_sis_mode(ctrl_info);
6058 dev_err(&ctrl_info->pci_dev->dev,
6059 "re-enabling SIS mode failed with error %d\n", rc);
6062 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6068 * If the controller isn't already in SIS mode, this function forces it into
6072 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
6074 if (!sis_is_firmware_running(ctrl_info))
6077 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
6080 if (sis_is_kernel_up(ctrl_info)) {
6081 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6085 return pqi_revert_to_sis_mode(ctrl_info);
6088 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
6092 rc = pqi_force_sis_mode(ctrl_info);
6097 * Wait until the controller is ready to start accepting SIS
6100 rc = sis_wait_for_ctrl_ready(ctrl_info);
6105 * Get the controller properties. This allows us to determine
6106 * whether or not it supports PQI mode.
6108 rc = sis_get_ctrl_properties(ctrl_info);
6110 dev_err(&ctrl_info->pci_dev->dev,
6111 "error obtaining controller properties\n");
6115 rc = sis_get_pqi_capabilities(ctrl_info);
6117 dev_err(&ctrl_info->pci_dev->dev,
6118 "error obtaining controller capabilities\n");
6122 if (reset_devices) {
6123 if (ctrl_info->max_outstanding_requests >
6124 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
6125 ctrl_info->max_outstanding_requests =
6126 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
6128 if (ctrl_info->max_outstanding_requests >
6129 PQI_MAX_OUTSTANDING_REQUESTS)
6130 ctrl_info->max_outstanding_requests =
6131 PQI_MAX_OUTSTANDING_REQUESTS;
6134 pqi_calculate_io_resources(ctrl_info);
6136 rc = pqi_alloc_error_buffer(ctrl_info);
6138 dev_err(&ctrl_info->pci_dev->dev,
6139 "failed to allocate PQI error buffer\n");
6144 * If the function we are about to call succeeds, the
6145 * controller will transition from legacy SIS mode
6148 rc = sis_init_base_struct_addr(ctrl_info);
6150 dev_err(&ctrl_info->pci_dev->dev,
6151 "error initializing PQI mode\n");
6155 /* Wait for the controller to complete the SIS -> PQI transition. */
6156 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6158 dev_err(&ctrl_info->pci_dev->dev,
6159 "transition to PQI mode failed\n");
6163 /* From here on, we are running in PQI mode. */
6164 ctrl_info->pqi_mode_enabled = true;
6165 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6167 rc = pqi_process_config_table(ctrl_info);
6171 rc = pqi_alloc_admin_queues(ctrl_info);
6173 dev_err(&ctrl_info->pci_dev->dev,
6174 "failed to allocate admin queues\n");
6178 rc = pqi_create_admin_queues(ctrl_info);
6180 dev_err(&ctrl_info->pci_dev->dev,
6181 "error creating admin queues\n");
6185 rc = pqi_report_device_capability(ctrl_info);
6187 dev_err(&ctrl_info->pci_dev->dev,
6188 "obtaining device capability failed\n");
6192 rc = pqi_validate_device_capability(ctrl_info);
6196 pqi_calculate_queue_resources(ctrl_info);
6198 rc = pqi_enable_msix_interrupts(ctrl_info);
6202 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
6203 ctrl_info->max_msix_vectors =
6204 ctrl_info->num_msix_vectors_enabled;
6205 pqi_calculate_queue_resources(ctrl_info);
6208 rc = pqi_alloc_io_resources(ctrl_info);
6212 rc = pqi_alloc_operational_queues(ctrl_info);
6214 dev_err(&ctrl_info->pci_dev->dev,
6215 "failed to allocate operational queues\n");
6219 pqi_init_operational_queues(ctrl_info);
6221 rc = pqi_request_irqs(ctrl_info);
6225 rc = pqi_create_queues(ctrl_info);
6229 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6231 ctrl_info->controller_online = true;
6232 pqi_start_heartbeat_timer(ctrl_info);
6234 rc = pqi_enable_events(ctrl_info);
6236 dev_err(&ctrl_info->pci_dev->dev,
6237 "error enabling events\n");
6241 /* Register with the SCSI subsystem. */
6242 rc = pqi_register_scsi(ctrl_info);
6246 rc = pqi_get_ctrl_firmware_version(ctrl_info);
6248 dev_err(&ctrl_info->pci_dev->dev,
6249 "error obtaining firmware version\n");
6253 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6255 dev_err(&ctrl_info->pci_dev->dev,
6256 "error updating host wellness\n");
6260 pqi_schedule_update_time_worker(ctrl_info);
6262 pqi_scan_scsi_devices(ctrl_info);
6267 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
6270 struct pqi_admin_queues *admin_queues;
6271 struct pqi_event_queue *event_queue;
6273 admin_queues = &ctrl_info->admin_queues;
6274 admin_queues->iq_pi_copy = 0;
6275 admin_queues->oq_ci_copy = 0;
6276 *admin_queues->oq_pi = 0;
6278 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6279 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
6280 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
6281 ctrl_info->queue_groups[i].oq_ci_copy = 0;
6283 *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
6284 *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
6285 *ctrl_info->queue_groups[i].oq_pi = 0;
6288 event_queue = &ctrl_info->event_queue;
6289 *event_queue->oq_pi = 0;
6290 event_queue->oq_ci_copy = 0;
6293 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
6297 rc = pqi_force_sis_mode(ctrl_info);
6302 * Wait until the controller is ready to start accepting SIS
6305 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
6310 * If the function we are about to call succeeds, the
6311 * controller will transition from legacy SIS mode
6314 rc = sis_init_base_struct_addr(ctrl_info);
6316 dev_err(&ctrl_info->pci_dev->dev,
6317 "error initializing PQI mode\n");
6321 /* Wait for the controller to complete the SIS -> PQI transition. */
6322 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6324 dev_err(&ctrl_info->pci_dev->dev,
6325 "transition to PQI mode failed\n");
6329 /* From here on, we are running in PQI mode. */
6330 ctrl_info->pqi_mode_enabled = true;
6331 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6333 pqi_reinit_queues(ctrl_info);
6335 rc = pqi_create_admin_queues(ctrl_info);
6337 dev_err(&ctrl_info->pci_dev->dev,
6338 "error creating admin queues\n");
6342 rc = pqi_create_queues(ctrl_info);
6346 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6348 ctrl_info->controller_online = true;
6349 pqi_start_heartbeat_timer(ctrl_info);
6350 pqi_ctrl_unblock_requests(ctrl_info);
6352 rc = pqi_enable_events(ctrl_info);
6354 dev_err(&ctrl_info->pci_dev->dev,
6355 "error enabling events\n");
6359 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6361 dev_err(&ctrl_info->pci_dev->dev,
6362 "error updating host wellness\n");
6366 pqi_schedule_update_time_worker(ctrl_info);
6368 pqi_scan_scsi_devices(ctrl_info);
6373 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
6376 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
6377 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
6380 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
6385 rc = pci_enable_device(ctrl_info->pci_dev);
6387 dev_err(&ctrl_info->pci_dev->dev,
6388 "failed to enable PCI device\n");
6392 if (sizeof(dma_addr_t) > 4)
6393 mask = DMA_BIT_MASK(64);
6395 mask = DMA_BIT_MASK(32);
6397 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
6399 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
6400 goto disable_device;
6403 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
6405 dev_err(&ctrl_info->pci_dev->dev,
6406 "failed to obtain PCI resources\n");
6407 goto disable_device;
6410 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
6411 ctrl_info->pci_dev, 0),
6412 sizeof(struct pqi_ctrl_registers));
6413 if (!ctrl_info->iomem_base) {
6414 dev_err(&ctrl_info->pci_dev->dev,
6415 "failed to map memory for controller registers\n");
6417 goto release_regions;
6420 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
6422 /* Increase the PCIe completion timeout. */
6423 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
6424 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
6426 dev_err(&ctrl_info->pci_dev->dev,
6427 "failed to set PCIe completion timeout\n");
6428 goto release_regions;
6431 /* Enable bus mastering. */
6432 pci_set_master(ctrl_info->pci_dev);
6434 ctrl_info->registers = ctrl_info->iomem_base;
6435 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
6437 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
6442 pci_release_regions(ctrl_info->pci_dev);
6444 pci_disable_device(ctrl_info->pci_dev);
6449 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
6451 iounmap(ctrl_info->iomem_base);
6452 pci_release_regions(ctrl_info->pci_dev);
6453 if (pci_is_enabled(ctrl_info->pci_dev))
6454 pci_disable_device(ctrl_info->pci_dev);
6455 pci_set_drvdata(ctrl_info->pci_dev, NULL);
6458 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
6460 struct pqi_ctrl_info *ctrl_info;
6462 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
6463 GFP_KERNEL, numa_node);
6467 mutex_init(&ctrl_info->scan_mutex);
6468 mutex_init(&ctrl_info->lun_reset_mutex);
6470 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
6471 spin_lock_init(&ctrl_info->scsi_device_list_lock);
6473 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
6474 atomic_set(&ctrl_info->num_interrupts, 0);
6476 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
6477 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
6479 init_timer(&ctrl_info->heartbeat_timer);
6480 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
6482 sema_init(&ctrl_info->sync_request_sem,
6483 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
6484 init_waitqueue_head(&ctrl_info->block_requests_wait);
6486 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
6487 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
6488 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
6489 pqi_raid_bypass_retry_worker);
6491 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
6492 ctrl_info->irq_mode = IRQ_MODE_NONE;
6493 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
6498 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
6503 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
6505 pqi_free_irqs(ctrl_info);
6506 pqi_disable_msix_interrupts(ctrl_info);
6509 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
6511 pqi_stop_heartbeat_timer(ctrl_info);
6512 pqi_free_interrupts(ctrl_info);
6513 if (ctrl_info->queue_memory_base)
6514 dma_free_coherent(&ctrl_info->pci_dev->dev,
6515 ctrl_info->queue_memory_length,
6516 ctrl_info->queue_memory_base,
6517 ctrl_info->queue_memory_base_dma_handle);
6518 if (ctrl_info->admin_queue_memory_base)
6519 dma_free_coherent(&ctrl_info->pci_dev->dev,
6520 ctrl_info->admin_queue_memory_length,
6521 ctrl_info->admin_queue_memory_base,
6522 ctrl_info->admin_queue_memory_base_dma_handle);
6523 pqi_free_all_io_requests(ctrl_info);
6524 if (ctrl_info->error_buffer)
6525 dma_free_coherent(&ctrl_info->pci_dev->dev,
6526 ctrl_info->error_buffer_length,
6527 ctrl_info->error_buffer,
6528 ctrl_info->error_buffer_dma_handle);
6529 if (ctrl_info->iomem_base)
6530 pqi_cleanup_pci_init(ctrl_info);
6531 pqi_free_ctrl_info(ctrl_info);
6534 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
6536 pqi_cancel_rescan_worker(ctrl_info);
6537 pqi_cancel_update_time_worker(ctrl_info);
6538 pqi_remove_all_scsi_devices(ctrl_info);
6539 pqi_unregister_scsi(ctrl_info);
6540 if (ctrl_info->pqi_mode_enabled)
6541 pqi_revert_to_sis_mode(ctrl_info);
6542 pqi_free_ctrl_resources(ctrl_info);
6545 static void pqi_perform_lockup_action(void)
6547 switch (pqi_lockup_action) {
6549 panic("FATAL: Smart Family Controller lockup detected");
6552 emergency_restart();
6560 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
6561 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
6562 .status = SAM_STAT_CHECK_CONDITION,
6565 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
6568 struct pqi_io_request *io_request;
6569 struct scsi_cmnd *scmd;
6571 for (i = 0; i < ctrl_info->max_io_slots; i++) {
6572 io_request = &ctrl_info->io_request_pool[i];
6573 if (atomic_read(&io_request->refcount) == 0)
6576 scmd = io_request->scmd;
6578 set_host_byte(scmd, DID_NO_CONNECT);
6580 io_request->status = -ENXIO;
6581 io_request->error_info =
6582 &pqi_ctrl_offline_raid_error_info;
6585 io_request->io_complete_callback(io_request,
6586 io_request->context);
6590 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
6592 pqi_perform_lockup_action();
6593 pqi_stop_heartbeat_timer(ctrl_info);
6594 pqi_free_interrupts(ctrl_info);
6595 pqi_cancel_rescan_worker(ctrl_info);
6596 pqi_cancel_update_time_worker(ctrl_info);
6597 pqi_ctrl_wait_until_quiesced(ctrl_info);
6598 pqi_fail_all_outstanding_requests(ctrl_info);
6599 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
6600 pqi_ctrl_unblock_requests(ctrl_info);
6603 static void pqi_ctrl_offline_worker(struct work_struct *work)
6605 struct pqi_ctrl_info *ctrl_info;
6607 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
6608 pqi_take_ctrl_offline_deferred(ctrl_info);
6611 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
6613 if (!ctrl_info->controller_online)
6616 ctrl_info->controller_online = false;
6617 ctrl_info->pqi_mode_enabled = false;
6618 pqi_ctrl_block_requests(ctrl_info);
6619 if (!pqi_disable_ctrl_shutdown)
6620 sis_shutdown_ctrl(ctrl_info);
6621 pci_disable_device(ctrl_info->pci_dev);
6622 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
6623 schedule_work(&ctrl_info->ctrl_offline_work);
6626 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
6627 const struct pci_device_id *id)
6629 char *ctrl_description;
6631 if (id->driver_data)
6632 ctrl_description = (char *)id->driver_data;
6634 ctrl_description = "Microsemi Smart Family Controller";
6636 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
6639 static int pqi_pci_probe(struct pci_dev *pci_dev,
6640 const struct pci_device_id *id)
6644 struct pqi_ctrl_info *ctrl_info;
6646 pqi_print_ctrl_info(pci_dev, id);
6648 if (pqi_disable_device_id_wildcards &&
6649 id->subvendor == PCI_ANY_ID &&
6650 id->subdevice == PCI_ANY_ID) {
6651 dev_warn(&pci_dev->dev,
6652 "controller not probed because device ID wildcards are disabled\n");
6656 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
6657 dev_warn(&pci_dev->dev,
6658 "controller device ID matched using wildcards\n");
6660 node = dev_to_node(&pci_dev->dev);
6661 if (node == NUMA_NO_NODE)
6662 set_dev_node(&pci_dev->dev, 0);
6664 ctrl_info = pqi_alloc_ctrl_info(node);
6666 dev_err(&pci_dev->dev,
6667 "failed to allocate controller info block\n");
6671 ctrl_info->pci_dev = pci_dev;
6673 rc = pqi_pci_init(ctrl_info);
6677 rc = pqi_ctrl_init(ctrl_info);
6684 pqi_remove_ctrl(ctrl_info);
6689 static void pqi_pci_remove(struct pci_dev *pci_dev)
6691 struct pqi_ctrl_info *ctrl_info;
6693 ctrl_info = pci_get_drvdata(pci_dev);
6697 pqi_remove_ctrl(ctrl_info);
6700 static void pqi_shutdown(struct pci_dev *pci_dev)
6703 struct pqi_ctrl_info *ctrl_info;
6705 ctrl_info = pci_get_drvdata(pci_dev);
6710 * Write all data in the controller's battery-backed cache to
6713 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
6714 pqi_free_interrupts(ctrl_info);
6715 pqi_reset(ctrl_info);
6720 dev_warn(&pci_dev->dev,
6721 "unable to flush controller cache\n");
6724 static void pqi_process_lockup_action_param(void)
6728 if (!pqi_lockup_action_param)
6731 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6732 if (strcmp(pqi_lockup_action_param,
6733 pqi_lockup_actions[i].name) == 0) {
6734 pqi_lockup_action = pqi_lockup_actions[i].action;
6739 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
6740 DRIVER_NAME_SHORT, pqi_lockup_action_param);
6743 static void pqi_process_module_params(void)
6745 pqi_process_lockup_action_param();
6748 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
6750 struct pqi_ctrl_info *ctrl_info;
6752 ctrl_info = pci_get_drvdata(pci_dev);
6754 pqi_disable_events(ctrl_info);
6755 pqi_cancel_update_time_worker(ctrl_info);
6756 pqi_cancel_rescan_worker(ctrl_info);
6757 pqi_wait_until_scan_finished(ctrl_info);
6758 pqi_wait_until_lun_reset_finished(ctrl_info);
6759 pqi_flush_cache(ctrl_info, SUSPEND);
6760 pqi_ctrl_block_requests(ctrl_info);
6761 pqi_ctrl_wait_until_quiesced(ctrl_info);
6762 pqi_wait_until_inbound_queues_empty(ctrl_info);
6763 pqi_ctrl_wait_for_pending_io(ctrl_info);
6764 pqi_stop_heartbeat_timer(ctrl_info);
6766 if (state.event == PM_EVENT_FREEZE)
6769 pci_save_state(pci_dev);
6770 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
6772 ctrl_info->controller_online = false;
6773 ctrl_info->pqi_mode_enabled = false;
6778 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
6781 struct pqi_ctrl_info *ctrl_info;
6783 ctrl_info = pci_get_drvdata(pci_dev);
6785 if (pci_dev->current_state != PCI_D0) {
6786 ctrl_info->max_hw_queue_index = 0;
6787 pqi_free_interrupts(ctrl_info);
6788 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
6789 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
6790 IRQF_SHARED, DRIVER_NAME_SHORT,
6791 &ctrl_info->queue_groups[0]);
6793 dev_err(&ctrl_info->pci_dev->dev,
6794 "irq %u init failed with error %d\n",
6798 pqi_start_heartbeat_timer(ctrl_info);
6799 pqi_ctrl_unblock_requests(ctrl_info);
6803 pci_set_power_state(pci_dev, PCI_D0);
6804 pci_restore_state(pci_dev);
6806 return pqi_ctrl_init_resume(ctrl_info);
6809 /* Define the PCI IDs for the controllers that we support. */
6810 static const struct pci_device_id pqi_pci_id_table[] = {
6812 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6816 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6820 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6824 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6828 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6832 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6833 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
6836 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6837 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
6840 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6841 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
6844 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6845 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
6848 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6849 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
6852 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6853 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
6856 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6857 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
6860 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6861 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
6864 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6865 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
6868 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6869 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
6872 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6873 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
6876 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6877 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
6880 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6881 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
6884 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6885 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
6888 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6889 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
6892 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6893 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
6896 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6897 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
6900 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6901 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
6904 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6905 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
6908 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6909 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
6912 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6913 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
6916 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6917 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
6920 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6921 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
6924 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6925 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
6928 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6929 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
6932 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6933 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
6936 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6937 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
6940 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6941 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
6944 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6945 PCI_VENDOR_ID_DELL, 0x1fe0)
6948 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6949 PCI_VENDOR_ID_HP, 0x0600)
6952 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6953 PCI_VENDOR_ID_HP, 0x0601)
6956 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6957 PCI_VENDOR_ID_HP, 0x0602)
6960 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6961 PCI_VENDOR_ID_HP, 0x0603)
6964 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6965 PCI_VENDOR_ID_HP, 0x0609)
6968 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6969 PCI_VENDOR_ID_HP, 0x0650)
6972 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6973 PCI_VENDOR_ID_HP, 0x0651)
6976 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6977 PCI_VENDOR_ID_HP, 0x0652)
6980 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6981 PCI_VENDOR_ID_HP, 0x0653)
6984 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6985 PCI_VENDOR_ID_HP, 0x0654)
6988 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6989 PCI_VENDOR_ID_HP, 0x0655)
6992 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6993 PCI_VENDOR_ID_HP, 0x0700)
6996 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6997 PCI_VENDOR_ID_HP, 0x0701)
7000 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7001 PCI_VENDOR_ID_HP, 0x1001)
7004 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7005 PCI_VENDOR_ID_HP, 0x1100)
7008 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7009 PCI_VENDOR_ID_HP, 0x1101)
7012 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7013 PCI_ANY_ID, PCI_ANY_ID)
7018 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
7020 static struct pci_driver pqi_pci_driver = {
7021 .name = DRIVER_NAME_SHORT,
7022 .id_table = pqi_pci_id_table,
7023 .probe = pqi_pci_probe,
7024 .remove = pqi_pci_remove,
7025 .shutdown = pqi_shutdown,
7026 #if defined(CONFIG_PM)
7027 .suspend = pqi_suspend,
7028 .resume = pqi_resume,
7032 static int __init pqi_init(void)
7036 pr_info(DRIVER_NAME "\n");
7038 pqi_sas_transport_template =
7039 sas_attach_transport(&pqi_sas_transport_functions);
7040 if (!pqi_sas_transport_template)
7043 pqi_process_module_params();
7045 rc = pci_register_driver(&pqi_pci_driver);
7047 sas_release_transport(pqi_sas_transport_template);
7052 static void __exit pqi_cleanup(void)
7054 pci_unregister_driver(&pqi_pci_driver);
7055 sas_release_transport(pqi_sas_transport_template);
7058 module_init(pqi_init);
7059 module_exit(pqi_cleanup);
7061 static void __attribute__((unused)) verify_structures(void)
7063 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7064 sis_host_to_ctrl_doorbell) != 0x20);
7065 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7066 sis_interrupt_mask) != 0x34);
7067 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7068 sis_ctrl_to_host_doorbell) != 0x9c);
7069 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7070 sis_ctrl_to_host_doorbell_clear) != 0xa0);
7071 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7072 sis_driver_scratch) != 0xb0);
7073 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7074 sis_firmware_status) != 0xbc);
7075 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7076 sis_mailbox) != 0x1000);
7077 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7078 pqi_registers) != 0x4000);
7080 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7082 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7084 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7085 response_queue_id) != 0x4);
7086 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7088 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
7090 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7092 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7093 service_response) != 0x1);
7094 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7095 data_present) != 0x2);
7096 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7098 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7099 residual_count) != 0x4);
7100 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7101 data_length) != 0x8);
7102 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7104 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7106 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
7108 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7109 data_in_result) != 0x0);
7110 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7111 data_out_result) != 0x1);
7112 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7114 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7116 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7117 status_qualifier) != 0x6);
7118 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7119 sense_data_length) != 0x8);
7120 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7121 response_data_length) != 0xa);
7122 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7123 data_in_transferred) != 0xc);
7124 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7125 data_out_transferred) != 0x10);
7126 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7128 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
7130 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7132 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7133 function_and_status_code) != 0x8);
7134 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7135 max_admin_iq_elements) != 0x10);
7136 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7137 max_admin_oq_elements) != 0x11);
7138 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7139 admin_iq_element_length) != 0x12);
7140 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7141 admin_oq_element_length) != 0x13);
7142 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7143 max_reset_timeout) != 0x14);
7144 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7145 legacy_intx_status) != 0x18);
7146 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7147 legacy_intx_mask_set) != 0x1c);
7148 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7149 legacy_intx_mask_clear) != 0x20);
7150 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7151 device_status) != 0x40);
7152 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7153 admin_iq_pi_offset) != 0x48);
7154 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7155 admin_oq_ci_offset) != 0x50);
7156 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7157 admin_iq_element_array_addr) != 0x58);
7158 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7159 admin_oq_element_array_addr) != 0x60);
7160 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7161 admin_iq_ci_addr) != 0x68);
7162 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7163 admin_oq_pi_addr) != 0x70);
7164 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7165 admin_iq_num_elements) != 0x78);
7166 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7167 admin_oq_num_elements) != 0x79);
7168 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7169 admin_queue_int_msg_num) != 0x7a);
7170 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7171 device_error) != 0x80);
7172 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7173 error_details) != 0x88);
7174 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7175 device_reset) != 0x90);
7176 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7177 power_action) != 0x94);
7178 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
7180 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7181 header.iu_type) != 0);
7182 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7183 header.iu_length) != 2);
7184 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7185 header.work_area) != 6);
7186 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7188 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7189 function_code) != 10);
7190 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7191 data.report_device_capability.buffer_length) != 44);
7192 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7193 data.report_device_capability.sg_descriptor) != 48);
7194 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7195 data.create_operational_iq.queue_id) != 12);
7196 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7197 data.create_operational_iq.element_array_addr) != 16);
7198 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7199 data.create_operational_iq.ci_addr) != 24);
7200 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7201 data.create_operational_iq.num_elements) != 32);
7202 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7203 data.create_operational_iq.element_length) != 34);
7204 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7205 data.create_operational_iq.queue_protocol) != 36);
7206 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7207 data.create_operational_oq.queue_id) != 12);
7208 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7209 data.create_operational_oq.element_array_addr) != 16);
7210 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7211 data.create_operational_oq.pi_addr) != 24);
7212 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7213 data.create_operational_oq.num_elements) != 32);
7214 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7215 data.create_operational_oq.element_length) != 34);
7216 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7217 data.create_operational_oq.queue_protocol) != 36);
7218 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7219 data.create_operational_oq.int_msg_num) != 40);
7220 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7221 data.create_operational_oq.coalescing_count) != 42);
7222 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7223 data.create_operational_oq.min_coalescing_time) != 44);
7224 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7225 data.create_operational_oq.max_coalescing_time) != 48);
7226 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7227 data.delete_operational_queue.queue_id) != 12);
7228 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
7229 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7230 data.create_operational_iq) != 64 - 11);
7231 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7232 data.create_operational_oq) != 64 - 11);
7233 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7234 data.delete_operational_queue) != 64 - 11);
7236 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7237 header.iu_type) != 0);
7238 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7239 header.iu_length) != 2);
7240 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7241 header.work_area) != 6);
7242 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7244 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7245 function_code) != 10);
7246 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7248 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7249 data.create_operational_iq.status_descriptor) != 12);
7250 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7251 data.create_operational_iq.iq_pi_offset) != 16);
7252 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7253 data.create_operational_oq.status_descriptor) != 12);
7254 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7255 data.create_operational_oq.oq_ci_offset) != 16);
7256 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
7258 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7259 header.iu_type) != 0);
7260 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7261 header.iu_length) != 2);
7262 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7263 header.response_queue_id) != 4);
7264 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7265 header.work_area) != 6);
7266 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7268 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7270 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7271 buffer_length) != 12);
7272 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7274 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7275 protocol_specific) != 24);
7276 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7277 error_index) != 27);
7278 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7280 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7281 sg_descriptors) != 64);
7282 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
7283 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
7285 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7286 header.iu_type) != 0);
7287 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7288 header.iu_length) != 2);
7289 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7290 header.response_queue_id) != 4);
7291 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7292 header.work_area) != 6);
7293 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7295 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7297 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7298 buffer_length) != 16);
7299 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7300 data_encryption_key_index) != 22);
7301 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7302 encrypt_tweak_lower) != 24);
7303 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7304 encrypt_tweak_upper) != 28);
7305 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7307 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7308 error_index) != 48);
7309 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7310 num_sg_descriptors) != 50);
7311 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7313 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7315 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7316 sg_descriptors) != 64);
7317 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
7318 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
7320 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7321 header.iu_type) != 0);
7322 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7323 header.iu_length) != 2);
7324 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7326 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7327 error_index) != 10);
7329 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7330 header.iu_type) != 0);
7331 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7332 header.iu_length) != 2);
7333 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7334 header.response_queue_id) != 4);
7335 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7337 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7338 data.report_event_configuration.buffer_length) != 12);
7339 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7340 data.report_event_configuration.sg_descriptors) != 16);
7341 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7342 data.set_event_configuration.global_event_oq_id) != 10);
7343 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7344 data.set_event_configuration.buffer_length) != 12);
7345 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7346 data.set_event_configuration.sg_descriptors) != 16);
7348 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
7349 max_inbound_iu_length) != 6);
7350 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
7351 max_outbound_iu_length) != 14);
7352 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
7354 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7356 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7357 iq_arbitration_priority_support_bitmask) != 8);
7358 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7359 maximum_aw_a) != 9);
7360 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7361 maximum_aw_b) != 10);
7362 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7363 maximum_aw_c) != 11);
7364 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7365 max_inbound_queues) != 16);
7366 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7367 max_elements_per_iq) != 18);
7368 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7369 max_iq_element_length) != 24);
7370 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7371 min_iq_element_length) != 26);
7372 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7373 max_outbound_queues) != 30);
7374 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7375 max_elements_per_oq) != 32);
7376 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7377 intr_coalescing_time_granularity) != 34);
7378 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7379 max_oq_element_length) != 36);
7380 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7381 min_oq_element_length) != 38);
7382 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7383 iu_layer_descriptors) != 64);
7384 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
7386 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
7388 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
7390 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
7392 BUILD_BUG_ON(offsetof(struct pqi_event_config,
7393 num_event_descriptors) != 2);
7394 BUILD_BUG_ON(offsetof(struct pqi_event_config,
7397 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
7398 ARRAY_SIZE(pqi_supported_event_types));
7400 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7401 header.iu_type) != 0);
7402 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7403 header.iu_length) != 2);
7404 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7406 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7408 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7409 additional_event_id) != 12);
7410 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7412 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
7414 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7415 header.iu_type) != 0);
7416 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7417 header.iu_length) != 2);
7418 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7420 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7422 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7423 additional_event_id) != 12);
7424 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
7426 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7427 header.iu_type) != 0);
7428 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7429 header.iu_length) != 2);
7430 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7432 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7434 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7436 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7437 protocol_specific) != 24);
7438 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7439 outbound_queue_id_to_manage) != 26);
7440 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7441 request_id_to_manage) != 28);
7442 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7443 task_management_function) != 30);
7444 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
7446 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7447 header.iu_type) != 0);
7448 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7449 header.iu_length) != 2);
7450 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7452 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7454 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7455 additional_response_info) != 12);
7456 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7457 response_code) != 15);
7458 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
7460 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7461 configured_logical_drive_count) != 0);
7462 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7463 configuration_signature) != 1);
7464 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7465 firmware_version) != 5);
7466 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7467 extended_logical_unit_count) != 154);
7468 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7469 firmware_build_number) != 190);
7470 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7471 controller_mode) != 292);
7473 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7474 phys_bay_in_box) != 115);
7475 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7476 device_type) != 120);
7477 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7478 redundant_path_present_map) != 1736);
7479 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7480 active_path_number) != 1738);
7481 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7482 alternate_paths_phys_connector) != 1739);
7483 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7484 alternate_paths_phys_box_on_port) != 1755);
7485 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7486 current_queue_depth_limit) != 1796);
7487 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
7489 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
7490 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
7491 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
7492 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7493 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
7494 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7495 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
7496 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
7497 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7498 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
7499 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
7500 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7502 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
7503 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
7504 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);