1 // SPDX-License-Identifier: GPL-2.0
3 * driver for Microsemi PQI-based storage controllers
4 * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
8 * Questions/Comments/Bugfixes to storagedev@microchip.com
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
30 #include "smartpqi_sis.h"
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
36 #define DRIVER_VERSION "1.2.16-010"
37 #define DRIVER_MAJOR 1
38 #define DRIVER_MINOR 2
39 #define DRIVER_RELEASE 16
40 #define DRIVER_REVISION 10
42 #define DRIVER_NAME "Microsemi PQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT "smartpqi"
46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
48 MODULE_AUTHOR("Microsemi");
49 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
51 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
52 MODULE_VERSION(DRIVER_VERSION);
53 MODULE_LICENSE("GPL");
55 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
56 static void pqi_ctrl_offline_worker(struct work_struct *work);
57 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
58 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
59 static void pqi_scan_start(struct Scsi_Host *shost);
60 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
61 struct pqi_queue_group *queue_group, enum pqi_io_path path,
62 struct pqi_io_request *io_request);
63 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
64 struct pqi_iu_header *request, unsigned int flags,
65 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
66 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
67 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
68 unsigned int cdb_length, struct pqi_queue_group *queue_group,
69 struct pqi_encryption_info *encryption_info, bool raid_bypass);
70 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
71 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
72 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
73 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
75 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
76 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
77 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
78 struct pqi_scsi_dev *device, unsigned long timeout_secs);
80 /* for flags argument to pqi_submit_raid_request_synchronous() */
81 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
83 static struct scsi_transport_template *pqi_sas_transport_template;
85 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
87 enum pqi_lockup_action {
93 static enum pqi_lockup_action pqi_lockup_action = NONE;
96 enum pqi_lockup_action action;
98 } pqi_lockup_actions[] = {
113 static unsigned int pqi_supported_event_types[] = {
114 PQI_EVENT_TYPE_HOTPLUG,
115 PQI_EVENT_TYPE_HARDWARE,
116 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
117 PQI_EVENT_TYPE_LOGICAL_DEVICE,
119 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
120 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
123 static int pqi_disable_device_id_wildcards;
124 module_param_named(disable_device_id_wildcards,
125 pqi_disable_device_id_wildcards, int, 0644);
126 MODULE_PARM_DESC(disable_device_id_wildcards,
127 "Disable device ID wildcards.");
129 static int pqi_disable_heartbeat;
130 module_param_named(disable_heartbeat,
131 pqi_disable_heartbeat, int, 0644);
132 MODULE_PARM_DESC(disable_heartbeat,
133 "Disable heartbeat.");
135 static int pqi_disable_ctrl_shutdown;
136 module_param_named(disable_ctrl_shutdown,
137 pqi_disable_ctrl_shutdown, int, 0644);
138 MODULE_PARM_DESC(disable_ctrl_shutdown,
139 "Disable controller shutdown when controller locked up.");
141 static char *pqi_lockup_action_param;
142 module_param_named(lockup_action,
143 pqi_lockup_action_param, charp, 0644);
144 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
145 "\t\tSupported: none, reboot, panic\n"
146 "\t\tDefault: none");
148 static int pqi_expose_ld_first;
149 module_param_named(expose_ld_first,
150 pqi_expose_ld_first, int, 0644);
151 MODULE_PARM_DESC(expose_ld_first,
152 "Expose logical drives before physical drives.");
154 static int pqi_hide_vsep;
155 module_param_named(hide_vsep,
156 pqi_hide_vsep, int, 0644);
157 MODULE_PARM_DESC(hide_vsep,
158 "Hide the virtual SEP for direct attached drives.");
160 static char *raid_levels[] = {
170 static char *pqi_raid_level_to_string(u8 raid_level)
172 if (raid_level < ARRAY_SIZE(raid_levels))
173 return raid_levels[raid_level];
175 return "RAID UNKNOWN";
180 #define SA_RAID_1 2 /* also used for RAID 10 */
181 #define SA_RAID_5 3 /* also used for RAID 50 */
183 #define SA_RAID_6 5 /* also used for RAID 60 */
184 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
185 #define SA_RAID_MAX SA_RAID_ADM
186 #define SA_RAID_UNKNOWN 0xff
188 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
190 pqi_prep_for_scsi_done(scmd);
191 scmd->scsi_done(scmd);
194 static inline void pqi_disable_write_same(struct scsi_device *sdev)
196 sdev->no_write_same = 1;
199 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
201 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
204 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
206 return !device->is_physical_device;
209 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
211 return scsi3addr[2] != 0;
214 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
216 return !ctrl_info->controller_online;
219 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
221 if (ctrl_info->controller_online)
222 if (!sis_is_firmware_running(ctrl_info))
223 pqi_take_ctrl_offline(ctrl_info);
226 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
228 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
231 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
232 struct pqi_ctrl_info *ctrl_info)
234 return sis_read_driver_scratch(ctrl_info);
237 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
238 enum pqi_ctrl_mode mode)
240 sis_write_driver_scratch(ctrl_info, mode);
243 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
245 ctrl_info->block_device_reset = true;
248 static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
250 return ctrl_info->block_device_reset;
253 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
255 return ctrl_info->block_requests;
258 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
260 ctrl_info->block_requests = true;
261 scsi_block_requests(ctrl_info->scsi_host);
264 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
266 ctrl_info->block_requests = false;
267 wake_up_all(&ctrl_info->block_requests_wait);
268 pqi_retry_raid_bypass_requests(ctrl_info);
269 scsi_unblock_requests(ctrl_info->scsi_host);
272 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
273 unsigned long timeout_msecs)
275 unsigned long remaining_msecs;
277 if (!pqi_ctrl_blocked(ctrl_info))
278 return timeout_msecs;
280 atomic_inc(&ctrl_info->num_blocked_threads);
282 if (timeout_msecs == NO_TIMEOUT) {
283 wait_event(ctrl_info->block_requests_wait,
284 !pqi_ctrl_blocked(ctrl_info));
285 remaining_msecs = timeout_msecs;
287 unsigned long remaining_jiffies;
290 wait_event_timeout(ctrl_info->block_requests_wait,
291 !pqi_ctrl_blocked(ctrl_info),
292 msecs_to_jiffies(timeout_msecs));
293 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
296 atomic_dec(&ctrl_info->num_blocked_threads);
298 return remaining_msecs;
301 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
303 while (atomic_read(&ctrl_info->num_busy_threads) >
304 atomic_read(&ctrl_info->num_blocked_threads))
305 usleep_range(1000, 2000);
308 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
310 return device->device_offline;
313 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
315 device->in_reset = true;
318 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
320 device->in_reset = false;
323 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
325 return device->in_reset;
328 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
330 ctrl_info->in_ofa = true;
333 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
335 ctrl_info->in_ofa = false;
338 static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
340 return ctrl_info->in_ofa;
343 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
345 device->in_remove = true;
348 static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
349 struct pqi_scsi_dev *device)
351 return device->in_remove && !ctrl_info->in_shutdown;
354 static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
356 ctrl_info->in_shutdown = true;
359 static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
361 return ctrl_info->in_shutdown;
364 static inline void pqi_schedule_rescan_worker_with_delay(
365 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
367 if (pqi_ctrl_offline(ctrl_info))
369 if (pqi_ctrl_in_ofa(ctrl_info))
372 schedule_delayed_work(&ctrl_info->rescan_work, delay);
375 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
377 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
380 #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
382 static inline void pqi_schedule_rescan_worker_delayed(
383 struct pqi_ctrl_info *ctrl_info)
385 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
388 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
390 cancel_delayed_work_sync(&ctrl_info->rescan_work);
393 static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
395 cancel_work_sync(&ctrl_info->event_work);
398 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
400 if (!ctrl_info->heartbeat_counter)
403 return readl(ctrl_info->heartbeat_counter);
406 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
408 if (!ctrl_info->soft_reset_status)
411 return readb(ctrl_info->soft_reset_status);
414 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
419 if (!ctrl_info->soft_reset_status)
422 status = pqi_read_soft_reset_status(ctrl_info);
424 writeb(status, ctrl_info->soft_reset_status);
427 static int pqi_map_single(struct pci_dev *pci_dev,
428 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
429 size_t buffer_length, enum dma_data_direction data_direction)
431 dma_addr_t bus_address;
433 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
436 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
438 if (dma_mapping_error(&pci_dev->dev, bus_address))
441 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
442 put_unaligned_le32(buffer_length, &sg_descriptor->length);
443 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
448 static void pqi_pci_unmap(struct pci_dev *pci_dev,
449 struct pqi_sg_descriptor *descriptors, int num_descriptors,
450 enum dma_data_direction data_direction)
454 if (data_direction == DMA_NONE)
457 for (i = 0; i < num_descriptors; i++)
458 dma_unmap_single(&pci_dev->dev,
459 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
460 get_unaligned_le32(&descriptors[i].length),
464 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
465 struct pqi_raid_path_request *request, u8 cmd,
466 u8 *scsi3addr, void *buffer, size_t buffer_length,
467 u16 vpd_page, enum dma_data_direction *dir)
470 size_t cdb_length = buffer_length;
472 memset(request, 0, sizeof(*request));
474 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
475 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
476 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
477 &request->header.iu_length);
478 put_unaligned_le32(buffer_length, &request->buffer_length);
479 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
480 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
481 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
487 request->data_direction = SOP_READ_FLAG;
489 if (vpd_page & VPD_PAGE) {
491 cdb[2] = (u8)vpd_page;
493 cdb[4] = (u8)cdb_length;
495 case CISS_REPORT_LOG:
496 case CISS_REPORT_PHYS:
497 request->data_direction = SOP_READ_FLAG;
499 if (cmd == CISS_REPORT_PHYS)
500 cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
502 cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
503 put_unaligned_be32(cdb_length, &cdb[6]);
505 case CISS_GET_RAID_MAP:
506 request->data_direction = SOP_READ_FLAG;
508 cdb[1] = CISS_GET_RAID_MAP;
509 put_unaligned_be32(cdb_length, &cdb[6]);
512 request->data_direction = SOP_WRITE_FLAG;
514 cdb[6] = BMIC_FLUSH_CACHE;
515 put_unaligned_be16(cdb_length, &cdb[7]);
517 case BMIC_SENSE_DIAG_OPTIONS:
520 case BMIC_IDENTIFY_CONTROLLER:
521 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
522 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
523 request->data_direction = SOP_READ_FLAG;
526 put_unaligned_be16(cdb_length, &cdb[7]);
528 case BMIC_SET_DIAG_OPTIONS:
531 case BMIC_WRITE_HOST_WELLNESS:
532 request->data_direction = SOP_WRITE_FLAG;
535 put_unaligned_be16(cdb_length, &cdb[7]);
537 case BMIC_CSMI_PASSTHRU:
538 request->data_direction = SOP_BIDIRECTIONAL;
540 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
542 put_unaligned_be16(cdb_length, &cdb[7]);
545 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
549 switch (request->data_direction) {
551 *dir = DMA_FROM_DEVICE;
554 *dir = DMA_TO_DEVICE;
556 case SOP_NO_DIRECTION_FLAG:
560 *dir = DMA_BIDIRECTIONAL;
564 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
565 buffer, buffer_length, *dir);
568 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
570 io_request->scmd = NULL;
571 io_request->status = 0;
572 io_request->error_info = NULL;
573 io_request->raid_bypass = false;
576 static struct pqi_io_request *pqi_alloc_io_request(
577 struct pqi_ctrl_info *ctrl_info)
579 struct pqi_io_request *io_request;
580 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
583 io_request = &ctrl_info->io_request_pool[i];
584 if (atomic_inc_return(&io_request->refcount) == 1)
586 atomic_dec(&io_request->refcount);
587 i = (i + 1) % ctrl_info->max_io_slots;
591 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
593 pqi_reinit_io_request(io_request);
598 static void pqi_free_io_request(struct pqi_io_request *io_request)
600 atomic_dec(&io_request->refcount);
603 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
604 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
605 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
608 struct pqi_raid_path_request request;
609 enum dma_data_direction dir;
611 rc = pqi_build_raid_path_request(ctrl_info, &request,
612 cmd, scsi3addr, buffer,
613 buffer_length, vpd_page, &dir);
617 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
618 error_info, timeout_msecs);
620 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
625 /* helper functions for pqi_send_scsi_raid_request */
627 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
628 u8 cmd, void *buffer, size_t buffer_length)
630 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
631 buffer, buffer_length, 0, NULL, NO_TIMEOUT);
634 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
635 u8 cmd, void *buffer, size_t buffer_length,
636 struct pqi_raid_error_info *error_info)
638 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
639 buffer, buffer_length, 0, error_info, NO_TIMEOUT);
642 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
643 struct bmic_identify_controller *buffer)
645 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
646 buffer, sizeof(*buffer));
649 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
650 struct bmic_sense_subsystem_info *sense_info)
652 return pqi_send_ctrl_raid_request(ctrl_info,
653 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
654 sizeof(*sense_info));
657 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
658 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
660 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
661 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
664 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
665 struct pqi_scsi_dev *device,
666 struct bmic_identify_physical_device *buffer, size_t buffer_length)
669 enum dma_data_direction dir;
670 u16 bmic_device_index;
671 struct pqi_raid_path_request request;
673 rc = pqi_build_raid_path_request(ctrl_info, &request,
674 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
675 buffer_length, 0, &dir);
679 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
680 request.cdb[2] = (u8)bmic_device_index;
681 request.cdb[9] = (u8)(bmic_device_index >> 8);
683 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
684 0, NULL, NO_TIMEOUT);
686 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
691 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
692 enum bmic_flush_cache_shutdown_event shutdown_event)
695 struct bmic_flush_cache *flush_cache;
698 * Don't bother trying to flush the cache if the controller is
701 if (pqi_ctrl_offline(ctrl_info))
704 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
708 flush_cache->shutdown_event = shutdown_event;
710 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
711 sizeof(*flush_cache));
718 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
719 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
720 struct pqi_raid_error_info *error_info)
722 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
723 buffer, buffer_length, error_info);
726 #define PQI_FETCH_PTRAID_DATA (1 << 31)
728 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
731 struct bmic_diag_options *diag;
733 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
737 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
738 diag, sizeof(*diag));
742 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
744 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
753 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
754 void *buffer, size_t buffer_length)
756 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
757 buffer, buffer_length);
762 struct bmic_host_wellness_driver_version {
764 u8 driver_version_tag[2];
765 __le16 driver_version_length;
766 char driver_version[32];
767 u8 dont_write_tag[2];
773 static int pqi_write_driver_version_to_host_wellness(
774 struct pqi_ctrl_info *ctrl_info)
777 struct bmic_host_wellness_driver_version *buffer;
778 size_t buffer_length;
780 buffer_length = sizeof(*buffer);
782 buffer = kmalloc(buffer_length, GFP_KERNEL);
786 buffer->start_tag[0] = '<';
787 buffer->start_tag[1] = 'H';
788 buffer->start_tag[2] = 'W';
789 buffer->start_tag[3] = '>';
790 buffer->driver_version_tag[0] = 'D';
791 buffer->driver_version_tag[1] = 'V';
792 put_unaligned_le16(sizeof(buffer->driver_version),
793 &buffer->driver_version_length);
794 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
795 sizeof(buffer->driver_version) - 1);
796 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
797 buffer->dont_write_tag[0] = 'D';
798 buffer->dont_write_tag[1] = 'W';
799 buffer->end_tag[0] = 'Z';
800 buffer->end_tag[1] = 'Z';
802 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
811 struct bmic_host_wellness_time {
816 u8 dont_write_tag[2];
822 static int pqi_write_current_time_to_host_wellness(
823 struct pqi_ctrl_info *ctrl_info)
826 struct bmic_host_wellness_time *buffer;
827 size_t buffer_length;
832 buffer_length = sizeof(*buffer);
834 buffer = kmalloc(buffer_length, GFP_KERNEL);
838 buffer->start_tag[0] = '<';
839 buffer->start_tag[1] = 'H';
840 buffer->start_tag[2] = 'W';
841 buffer->start_tag[3] = '>';
842 buffer->time_tag[0] = 'T';
843 buffer->time_tag[1] = 'D';
844 put_unaligned_le16(sizeof(buffer->time),
845 &buffer->time_length);
847 local_time = ktime_get_real_seconds();
848 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
849 year = tm.tm_year + 1900;
851 buffer->time[0] = bin2bcd(tm.tm_hour);
852 buffer->time[1] = bin2bcd(tm.tm_min);
853 buffer->time[2] = bin2bcd(tm.tm_sec);
855 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
856 buffer->time[5] = bin2bcd(tm.tm_mday);
857 buffer->time[6] = bin2bcd(year / 100);
858 buffer->time[7] = bin2bcd(year % 100);
860 buffer->dont_write_tag[0] = 'D';
861 buffer->dont_write_tag[1] = 'W';
862 buffer->end_tag[0] = 'Z';
863 buffer->end_tag[1] = 'Z';
865 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
872 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
874 static void pqi_update_time_worker(struct work_struct *work)
877 struct pqi_ctrl_info *ctrl_info;
879 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
882 if (pqi_ctrl_offline(ctrl_info))
885 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
887 dev_warn(&ctrl_info->pci_dev->dev,
888 "error updating time on controller\n");
890 schedule_delayed_work(&ctrl_info->update_time_work,
891 PQI_UPDATE_TIME_WORK_INTERVAL);
894 static inline void pqi_schedule_update_time_worker(
895 struct pqi_ctrl_info *ctrl_info)
897 schedule_delayed_work(&ctrl_info->update_time_work, 0);
900 static inline void pqi_cancel_update_time_worker(
901 struct pqi_ctrl_info *ctrl_info)
903 cancel_delayed_work_sync(&ctrl_info->update_time_work);
906 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
907 void *buffer, size_t buffer_length)
909 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
913 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
917 size_t lun_list_length;
918 size_t lun_data_length;
919 size_t new_lun_list_length;
920 void *lun_data = NULL;
921 struct report_lun_header *report_lun_header;
923 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
924 if (!report_lun_header) {
929 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
930 sizeof(*report_lun_header));
934 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
937 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
939 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
945 if (lun_list_length == 0) {
946 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
950 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
954 new_lun_list_length = get_unaligned_be32(
955 &((struct report_lun_header *)lun_data)->list_length);
957 if (new_lun_list_length > lun_list_length) {
958 lun_list_length = new_lun_list_length;
964 kfree(report_lun_header);
976 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
979 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
983 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
986 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
989 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
990 struct report_phys_lun_extended **physdev_list,
991 struct report_log_lun_extended **logdev_list)
994 size_t logdev_list_length;
995 size_t logdev_data_length;
996 struct report_log_lun_extended *internal_logdev_list;
997 struct report_log_lun_extended *logdev_data;
998 struct report_lun_header report_lun_header;
1000 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1002 dev_err(&ctrl_info->pci_dev->dev,
1003 "report physical LUNs failed\n");
1005 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1007 dev_err(&ctrl_info->pci_dev->dev,
1008 "report logical LUNs failed\n");
1011 * Tack the controller itself onto the end of the logical device list.
1014 logdev_data = *logdev_list;
1017 logdev_list_length =
1018 get_unaligned_be32(&logdev_data->header.list_length);
1020 memset(&report_lun_header, 0, sizeof(report_lun_header));
1022 (struct report_log_lun_extended *)&report_lun_header;
1023 logdev_list_length = 0;
1026 logdev_data_length = sizeof(struct report_lun_header) +
1029 internal_logdev_list = kmalloc(logdev_data_length +
1030 sizeof(struct report_log_lun_extended), GFP_KERNEL);
1031 if (!internal_logdev_list) {
1032 kfree(*logdev_list);
1033 *logdev_list = NULL;
1037 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1038 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1039 sizeof(struct report_log_lun_extended_entry));
1040 put_unaligned_be32(logdev_list_length +
1041 sizeof(struct report_log_lun_extended_entry),
1042 &internal_logdev_list->header.list_length);
1044 kfree(*logdev_list);
1045 *logdev_list = internal_logdev_list;
1050 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1051 int bus, int target, int lun)
1054 device->target = target;
1058 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1066 scsi3addr = device->scsi3addr;
1067 lunid = get_unaligned_le32(scsi3addr);
1069 if (pqi_is_hba_lunid(scsi3addr)) {
1070 /* The specified device is the controller. */
1071 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1072 device->target_lun_valid = true;
1076 if (pqi_is_logical_device(device)) {
1077 if (device->is_external_raid_device) {
1078 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1079 target = (lunid >> 16) & 0x3fff;
1082 bus = PQI_RAID_VOLUME_BUS;
1084 lun = lunid & 0x3fff;
1086 pqi_set_bus_target_lun(device, bus, target, lun);
1087 device->target_lun_valid = true;
1092 * Defer target and LUN assignment for non-controller physical devices
1093 * because the SAS transport layer will make these assignments later.
1095 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1098 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1099 struct pqi_scsi_dev *device)
1105 raid_level = SA_RAID_UNKNOWN;
1107 buffer = kmalloc(64, GFP_KERNEL);
1109 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1110 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1112 raid_level = buffer[8];
1113 if (raid_level > SA_RAID_MAX)
1114 raid_level = SA_RAID_UNKNOWN;
1119 device->raid_level = raid_level;
1122 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1123 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1127 u32 r5or6_blocks_per_row;
1129 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1131 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1132 err_msg = "RAID map too small";
1136 if (device->raid_level == SA_RAID_1) {
1137 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1138 err_msg = "invalid RAID-1 map";
1141 } else if (device->raid_level == SA_RAID_ADM) {
1142 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1143 err_msg = "invalid RAID-1(ADM) map";
1146 } else if ((device->raid_level == SA_RAID_5 ||
1147 device->raid_level == SA_RAID_6) &&
1148 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1150 r5or6_blocks_per_row =
1151 get_unaligned_le16(&raid_map->strip_size) *
1152 get_unaligned_le16(&raid_map->data_disks_per_row);
1153 if (r5or6_blocks_per_row == 0) {
1154 err_msg = "invalid RAID-5 or RAID-6 map";
1162 dev_warn(&ctrl_info->pci_dev->dev,
1163 "logical device %08x%08x %s\n",
1164 *((u32 *)&device->scsi3addr),
1165 *((u32 *)&device->scsi3addr[4]), err_msg);
1170 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1171 struct pqi_scsi_dev *device)
1175 struct raid_map *raid_map;
1177 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1181 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1182 device->scsi3addr, raid_map, sizeof(*raid_map),
1183 0, NULL, NO_TIMEOUT);
1188 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1190 if (raid_map_size > sizeof(*raid_map)) {
1194 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1198 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1199 device->scsi3addr, raid_map, raid_map_size,
1200 0, NULL, NO_TIMEOUT);
1204 if (get_unaligned_le32(&raid_map->structure_size)
1206 dev_warn(&ctrl_info->pci_dev->dev,
1207 "Requested %d bytes, received %d bytes",
1209 get_unaligned_le32(&raid_map->structure_size));
1215 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1219 device->raid_map = raid_map;
1229 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1230 struct pqi_scsi_dev *device)
1236 buffer = kmalloc(64, GFP_KERNEL);
1240 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1241 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1245 #define RAID_BYPASS_STATUS 4
1246 #define RAID_BYPASS_CONFIGURED 0x1
1247 #define RAID_BYPASS_ENABLED 0x2
1249 bypass_status = buffer[RAID_BYPASS_STATUS];
1250 device->raid_bypass_configured =
1251 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1252 if (device->raid_bypass_configured &&
1253 (bypass_status & RAID_BYPASS_ENABLED) &&
1254 pqi_get_raid_map(ctrl_info, device) == 0)
1255 device->raid_bypass_enabled = true;
1262 * Use vendor-specific VPD to determine online/offline status of a volume.
1265 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1266 struct pqi_scsi_dev *device)
1270 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1271 bool volume_offline = true;
1273 struct ciss_vpd_logical_volume_status *vpd;
1275 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1279 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1280 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1284 if (vpd->page_code != CISS_VPD_LV_STATUS)
1287 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1288 volume_status) + vpd->page_length;
1289 if (page_length < sizeof(*vpd))
1292 volume_status = vpd->volume_status;
1293 volume_flags = get_unaligned_be32(&vpd->flags);
1294 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1299 device->volume_status = volume_status;
1300 device->volume_offline = volume_offline;
1303 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1304 struct pqi_scsi_dev *device,
1305 struct bmic_identify_physical_device *id_phys)
1309 memset(id_phys, 0, sizeof(*id_phys));
1311 rc = pqi_identify_physical_device(ctrl_info, device,
1312 id_phys, sizeof(*id_phys));
1314 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1318 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1319 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1321 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1322 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1324 device->box_index = id_phys->box_index;
1325 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1326 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1327 device->queue_depth =
1328 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1329 device->active_path_index = id_phys->active_path_number;
1330 device->path_map = id_phys->redundant_path_present_map;
1331 memcpy(&device->box,
1332 &id_phys->alternate_paths_phys_box_on_port,
1333 sizeof(device->box));
1334 memcpy(&device->phys_connector,
1335 &id_phys->alternate_paths_phys_connector,
1336 sizeof(device->phys_connector));
1337 device->bay = id_phys->phys_bay_in_box;
1342 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1343 struct pqi_scsi_dev *device)
1348 buffer = kmalloc(64, GFP_KERNEL);
1352 /* Send an inquiry to the device to see what it is. */
1353 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1357 scsi_sanitize_inquiry_string(&buffer[8], 8);
1358 scsi_sanitize_inquiry_string(&buffer[16], 16);
1360 device->devtype = buffer[0] & 0x1f;
1361 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1362 memcpy(device->model, &buffer[16], sizeof(device->model));
1364 if (device->devtype == TYPE_DISK) {
1365 if (device->is_external_raid_device) {
1366 device->raid_level = SA_RAID_UNKNOWN;
1367 device->volume_status = CISS_LV_OK;
1368 device->volume_offline = false;
1370 pqi_get_raid_level(ctrl_info, device);
1371 pqi_get_raid_bypass_status(ctrl_info, device);
1372 pqi_get_volume_status(ctrl_info, device);
1382 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1383 struct pqi_scsi_dev *device,
1384 struct bmic_identify_physical_device *id_phys)
1388 if (device->is_expander_smp_device)
1391 if (pqi_is_logical_device(device))
1392 rc = pqi_get_logical_device_info(ctrl_info, device);
1394 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1399 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1400 struct pqi_scsi_dev *device)
1403 static const char unknown_state_str[] =
1404 "Volume is in an unknown state (%u)";
1405 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1407 switch (device->volume_status) {
1409 status = "Volume online";
1411 case CISS_LV_FAILED:
1412 status = "Volume failed";
1414 case CISS_LV_NOT_CONFIGURED:
1415 status = "Volume not configured";
1417 case CISS_LV_DEGRADED:
1418 status = "Volume degraded";
1420 case CISS_LV_READY_FOR_RECOVERY:
1421 status = "Volume ready for recovery operation";
1423 case CISS_LV_UNDERGOING_RECOVERY:
1424 status = "Volume undergoing recovery";
1426 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1427 status = "Wrong physical drive was replaced";
1429 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1430 status = "A physical drive not properly connected";
1432 case CISS_LV_HARDWARE_OVERHEATING:
1433 status = "Hardware is overheating";
1435 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1436 status = "Hardware has overheated";
1438 case CISS_LV_UNDERGOING_EXPANSION:
1439 status = "Volume undergoing expansion";
1441 case CISS_LV_NOT_AVAILABLE:
1442 status = "Volume waiting for transforming volume";
1444 case CISS_LV_QUEUED_FOR_EXPANSION:
1445 status = "Volume queued for expansion";
1447 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1448 status = "Volume disabled due to SCSI ID conflict";
1450 case CISS_LV_EJECTED:
1451 status = "Volume has been ejected";
1453 case CISS_LV_UNDERGOING_ERASE:
1454 status = "Volume undergoing background erase";
1456 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1457 status = "Volume ready for predictive spare rebuild";
1459 case CISS_LV_UNDERGOING_RPI:
1460 status = "Volume undergoing rapid parity initialization";
1462 case CISS_LV_PENDING_RPI:
1463 status = "Volume queued for rapid parity initialization";
1465 case CISS_LV_ENCRYPTED_NO_KEY:
1466 status = "Encrypted volume inaccessible - key not present";
1468 case CISS_LV_UNDERGOING_ENCRYPTION:
1469 status = "Volume undergoing encryption process";
1471 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1472 status = "Volume undergoing encryption re-keying process";
1474 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1475 status = "Volume encrypted but encryption is disabled";
1477 case CISS_LV_PENDING_ENCRYPTION:
1478 status = "Volume pending migration to encrypted state";
1480 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1481 status = "Volume pending encryption rekeying";
1483 case CISS_LV_NOT_SUPPORTED:
1484 status = "Volume not supported on this controller";
1486 case CISS_LV_STATUS_UNAVAILABLE:
1487 status = "Volume status not available";
1490 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1491 unknown_state_str, device->volume_status);
1492 status = unknown_state_buffer;
1496 dev_info(&ctrl_info->pci_dev->dev,
1497 "scsi %d:%d:%d:%d %s\n",
1498 ctrl_info->scsi_host->host_no,
1499 device->bus, device->target, device->lun, status);
1502 static void pqi_rescan_worker(struct work_struct *work)
1504 struct pqi_ctrl_info *ctrl_info;
1506 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1509 pqi_scan_scsi_devices(ctrl_info);
1512 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1513 struct pqi_scsi_dev *device)
1517 if (pqi_is_logical_device(device))
1518 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1519 device->target, device->lun);
1521 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1526 #define PQI_PENDING_IO_TIMEOUT_SECS 20
1528 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1529 struct pqi_scsi_dev *device)
1533 pqi_device_remove_start(device);
1535 rc = pqi_device_wait_for_pending_io(ctrl_info, device, PQI_PENDING_IO_TIMEOUT_SECS);
1537 dev_err(&ctrl_info->pci_dev->dev,
1538 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1539 ctrl_info->scsi_host->host_no, device->bus,
1540 device->target, device->lun,
1541 atomic_read(&device->scsi_cmds_outstanding));
1543 if (pqi_is_logical_device(device))
1544 scsi_remove_device(device->sdev);
1546 pqi_remove_sas_device(device);
1549 /* Assumes the SCSI device list lock is held. */
1551 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1552 int bus, int target, int lun)
1554 struct pqi_scsi_dev *device;
1556 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1557 if (device->bus == bus && device->target == target && device->lun == lun)
1563 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1564 struct pqi_scsi_dev *dev2)
1566 if (dev1->is_physical_device != dev2->is_physical_device)
1569 if (dev1->is_physical_device)
1570 return dev1->wwid == dev2->wwid;
1572 return memcmp(dev1->volume_id, dev2->volume_id,
1573 sizeof(dev1->volume_id)) == 0;
1576 enum pqi_find_result {
1582 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1583 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1585 struct pqi_scsi_dev *device;
1587 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1588 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1589 *matching_device = device;
1590 if (pqi_device_equal(device_to_find, device)) {
1591 if (device_to_find->volume_offline)
1592 return DEVICE_CHANGED;
1595 return DEVICE_CHANGED;
1599 return DEVICE_NOT_FOUND;
1602 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1604 if (device->is_expander_smp_device)
1605 return "Enclosure SMP ";
1607 return scsi_device_type(device->devtype);
1610 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1612 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1613 char *action, struct pqi_scsi_dev *device)
1616 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1618 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1619 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1621 if (device->target_lun_valid)
1622 count += scnprintf(buffer + count,
1623 PQI_DEV_INFO_BUFFER_LENGTH - count,
1628 count += scnprintf(buffer + count,
1629 PQI_DEV_INFO_BUFFER_LENGTH - count,
1632 if (pqi_is_logical_device(device))
1633 count += scnprintf(buffer + count,
1634 PQI_DEV_INFO_BUFFER_LENGTH - count,
1636 *((u32 *)&device->scsi3addr),
1637 *((u32 *)&device->scsi3addr[4]));
1639 count += scnprintf(buffer + count,
1640 PQI_DEV_INFO_BUFFER_LENGTH - count,
1641 " %016llx", device->sas_address);
1643 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1645 pqi_device_type(device),
1649 if (pqi_is_logical_device(device)) {
1650 if (device->devtype == TYPE_DISK)
1651 count += scnprintf(buffer + count,
1652 PQI_DEV_INFO_BUFFER_LENGTH - count,
1653 "SSDSmartPathCap%c En%c %-12s",
1654 device->raid_bypass_configured ? '+' : '-',
1655 device->raid_bypass_enabled ? '+' : '-',
1656 pqi_raid_level_to_string(device->raid_level));
1658 count += scnprintf(buffer + count,
1659 PQI_DEV_INFO_BUFFER_LENGTH - count,
1660 "AIO%c", device->aio_enabled ? '+' : '-');
1661 if (device->devtype == TYPE_DISK ||
1662 device->devtype == TYPE_ZBC)
1663 count += scnprintf(buffer + count,
1664 PQI_DEV_INFO_BUFFER_LENGTH - count,
1665 " qd=%-6d", device->queue_depth);
1668 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
1671 /* Assumes the SCSI device list lock is held. */
1673 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1674 struct pqi_scsi_dev *new_device)
1676 existing_device->devtype = new_device->devtype;
1677 existing_device->device_type = new_device->device_type;
1678 existing_device->bus = new_device->bus;
1679 if (new_device->target_lun_valid) {
1680 existing_device->target = new_device->target;
1681 existing_device->lun = new_device->lun;
1682 existing_device->target_lun_valid = true;
1685 if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION ||
1686 existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) &&
1687 new_device->volume_status == CISS_LV_OK)
1688 existing_device->rescan = true;
1690 /* By definition, the scsi3addr and wwid fields are already the same. */
1692 existing_device->is_physical_device = new_device->is_physical_device;
1693 existing_device->is_external_raid_device =
1694 new_device->is_external_raid_device;
1695 existing_device->is_expander_smp_device =
1696 new_device->is_expander_smp_device;
1697 existing_device->aio_enabled = new_device->aio_enabled;
1698 memcpy(existing_device->vendor, new_device->vendor,
1699 sizeof(existing_device->vendor));
1700 memcpy(existing_device->model, new_device->model,
1701 sizeof(existing_device->model));
1702 existing_device->sas_address = new_device->sas_address;
1703 existing_device->raid_level = new_device->raid_level;
1704 existing_device->queue_depth = new_device->queue_depth;
1705 existing_device->aio_handle = new_device->aio_handle;
1706 existing_device->volume_status = new_device->volume_status;
1707 existing_device->active_path_index = new_device->active_path_index;
1708 existing_device->path_map = new_device->path_map;
1709 existing_device->bay = new_device->bay;
1710 existing_device->box_index = new_device->box_index;
1711 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
1712 existing_device->phy_connected_dev_type =
1713 new_device->phy_connected_dev_type;
1714 memcpy(existing_device->box, new_device->box,
1715 sizeof(existing_device->box));
1716 memcpy(existing_device->phys_connector, new_device->phys_connector,
1717 sizeof(existing_device->phys_connector));
1718 existing_device->offload_to_mirror = 0;
1719 kfree(existing_device->raid_map);
1720 existing_device->raid_map = new_device->raid_map;
1721 existing_device->raid_bypass_configured =
1722 new_device->raid_bypass_configured;
1723 existing_device->raid_bypass_enabled =
1724 new_device->raid_bypass_enabled;
1725 existing_device->device_offline = false;
1727 /* To prevent this from being freed later. */
1728 new_device->raid_map = NULL;
1731 static inline void pqi_free_device(struct pqi_scsi_dev *device)
1734 kfree(device->raid_map);
1740 * Called when exposing a new device to the OS fails in order to re-adjust
1741 * our internal SCSI device list to match the SCSI ML's view.
1744 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1745 struct pqi_scsi_dev *device)
1747 unsigned long flags;
1749 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1750 list_del(&device->scsi_device_list_entry);
1751 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1753 /* Allow the device structure to be freed later. */
1754 device->keep_device = false;
1757 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
1759 if (device->is_expander_smp_device)
1760 return device->sas_port != NULL;
1762 return device->sdev != NULL;
1765 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1766 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1770 unsigned long flags;
1771 enum pqi_find_result find_result;
1772 struct pqi_scsi_dev *device;
1773 struct pqi_scsi_dev *next;
1774 struct pqi_scsi_dev *matching_device;
1775 LIST_HEAD(add_list);
1776 LIST_HEAD(delete_list);
1779 * The idea here is to do as little work as possible while holding the
1780 * spinlock. That's why we go to great pains to defer anything other
1781 * than updating the internal device list until after we release the
1785 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1787 /* Assume that all devices in the existing list have gone away. */
1788 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1789 device->device_gone = true;
1791 for (i = 0; i < num_new_devices; i++) {
1792 device = new_device_list[i];
1794 find_result = pqi_scsi_find_entry(ctrl_info, device,
1797 switch (find_result) {
1800 * The newly found device is already in the existing
1803 device->new_device = false;
1804 matching_device->device_gone = false;
1805 pqi_scsi_update_device(matching_device, device);
1807 case DEVICE_NOT_FOUND:
1809 * The newly found device is NOT in the existing device
1812 device->new_device = true;
1814 case DEVICE_CHANGED:
1816 * The original device has gone away and we need to add
1819 device->new_device = true;
1824 /* Process all devices that have gone away. */
1825 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1826 scsi_device_list_entry) {
1827 if (device->device_gone) {
1828 list_del_init(&device->scsi_device_list_entry);
1829 list_add_tail(&device->delete_list_entry, &delete_list);
1833 /* Process all new devices. */
1834 for (i = 0; i < num_new_devices; i++) {
1835 device = new_device_list[i];
1836 if (!device->new_device)
1838 if (device->volume_offline)
1840 list_add_tail(&device->scsi_device_list_entry,
1841 &ctrl_info->scsi_device_list);
1842 list_add_tail(&device->add_list_entry, &add_list);
1843 /* To prevent this device structure from being freed later. */
1844 device->keep_device = true;
1847 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1849 if (pqi_ctrl_in_ofa(ctrl_info))
1850 pqi_ctrl_ofa_done(ctrl_info);
1852 /* Remove all devices that have gone away. */
1853 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
1854 if (device->volume_offline) {
1855 pqi_dev_info(ctrl_info, "offline", device);
1856 pqi_show_volume_status(ctrl_info, device);
1858 list_del(&device->delete_list_entry);
1859 if (pqi_is_device_added(device)) {
1860 pqi_remove_device(ctrl_info, device);
1862 if (!device->volume_offline)
1863 pqi_dev_info(ctrl_info, "removed", device);
1864 pqi_free_device(device);
1869 * Notify the SCSI ML if the queue depth of any existing device has
1872 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1873 scsi_device_list_entry) {
1875 if (device->queue_depth !=
1876 device->advertised_queue_depth) {
1877 device->advertised_queue_depth = device->queue_depth;
1878 scsi_change_queue_depth(device->sdev,
1879 device->advertised_queue_depth);
1881 if (device->rescan) {
1882 scsi_rescan_device(&device->sdev->sdev_gendev);
1883 device->rescan = false;
1888 /* Expose any new devices. */
1889 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1890 if (!pqi_is_device_added(device)) {
1891 rc = pqi_add_device(ctrl_info, device);
1893 pqi_dev_info(ctrl_info, "added", device);
1895 dev_warn(&ctrl_info->pci_dev->dev,
1896 "scsi %d:%d:%d:%d addition failed, device not added\n",
1897 ctrl_info->scsi_host->host_no,
1898 device->bus, device->target,
1900 pqi_fixup_botched_add(ctrl_info, device);
1906 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1909 * Only support the HBA controller itself as a RAID
1910 * controller. If it's a RAID controller other than
1911 * the HBA itself (an external RAID controller, for
1912 * example), we don't support it.
1914 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
1915 !pqi_is_hba_lunid(device->scsi3addr))
1921 static inline bool pqi_skip_device(u8 *scsi3addr)
1923 /* Ignore all masked devices. */
1924 if (MASKED_DEVICE(scsi3addr))
1930 static inline void pqi_mask_device(u8 *scsi3addr)
1932 scsi3addr[3] |= 0xc0;
1935 static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
1937 switch (device->device_type) {
1938 case SA_DEVICE_TYPE_SAS:
1939 case SA_DEVICE_TYPE_EXPANDER_SMP:
1940 case SA_DEVICE_TYPE_SES:
1947 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1949 return !device->is_physical_device ||
1950 !pqi_skip_device(device->scsi3addr);
1953 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1957 LIST_HEAD(new_device_list_head);
1958 struct report_phys_lun_extended *physdev_list = NULL;
1959 struct report_log_lun_extended *logdev_list = NULL;
1960 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1961 struct report_log_lun_extended_entry *log_lun_ext_entry;
1962 struct bmic_identify_physical_device *id_phys = NULL;
1965 struct pqi_scsi_dev **new_device_list = NULL;
1966 struct pqi_scsi_dev *device;
1967 struct pqi_scsi_dev *next;
1968 unsigned int num_new_devices;
1969 unsigned int num_valid_devices;
1970 bool is_physical_device;
1972 unsigned int physical_index;
1973 unsigned int logical_index;
1974 static char *out_of_memory_msg =
1975 "failed to allocate memory, device discovery stopped";
1977 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1983 get_unaligned_be32(&physdev_list->header.list_length)
1984 / sizeof(physdev_list->lun_entries[0]);
1990 get_unaligned_be32(&logdev_list->header.list_length)
1991 / sizeof(logdev_list->lun_entries[0]);
1995 if (num_physicals) {
1997 * We need this buffer for calls to pqi_get_physical_disk_info()
1998 * below. We allocate it here instead of inside
1999 * pqi_get_physical_disk_info() because it's a fairly large
2002 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2004 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2010 if (pqi_hide_vsep) {
2011 for (i = num_physicals - 1; i >= 0; i--) {
2012 phys_lun_ext_entry =
2013 &physdev_list->lun_entries[i];
2014 if (CISS_GET_DRIVE_NUMBER(
2015 phys_lun_ext_entry->lunid) ==
2016 PQI_VSEP_CISS_BTL) {
2018 phys_lun_ext_entry->lunid);
2025 num_new_devices = num_physicals + num_logicals;
2027 new_device_list = kmalloc_array(num_new_devices,
2028 sizeof(*new_device_list),
2030 if (!new_device_list) {
2031 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2036 for (i = 0; i < num_new_devices; i++) {
2037 device = kzalloc(sizeof(*device), GFP_KERNEL);
2039 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2044 list_add_tail(&device->new_device_list_entry,
2045 &new_device_list_head);
2049 num_valid_devices = 0;
2053 for (i = 0; i < num_new_devices; i++) {
2055 if ((!pqi_expose_ld_first && i < num_physicals) ||
2056 (pqi_expose_ld_first && i >= num_logicals)) {
2057 is_physical_device = true;
2058 phys_lun_ext_entry =
2059 &physdev_list->lun_entries[physical_index++];
2060 log_lun_ext_entry = NULL;
2061 scsi3addr = phys_lun_ext_entry->lunid;
2063 is_physical_device = false;
2064 phys_lun_ext_entry = NULL;
2066 &logdev_list->lun_entries[logical_index++];
2067 scsi3addr = log_lun_ext_entry->lunid;
2070 if (is_physical_device && pqi_skip_device(scsi3addr))
2074 device = list_next_entry(device, new_device_list_entry);
2076 device = list_first_entry(&new_device_list_head,
2077 struct pqi_scsi_dev, new_device_list_entry);
2079 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2080 device->is_physical_device = is_physical_device;
2081 if (is_physical_device) {
2082 device->device_type = phys_lun_ext_entry->device_type;
2083 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2084 device->is_expander_smp_device = true;
2086 device->is_external_raid_device =
2087 pqi_is_external_raid_addr(scsi3addr);
2090 if (!pqi_is_supported_device(device))
2093 /* Gather information about the device. */
2094 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2095 if (rc == -ENOMEM) {
2096 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2101 if (device->is_physical_device)
2102 dev_warn(&ctrl_info->pci_dev->dev,
2103 "obtaining device info failed, skipping physical device %016llx\n",
2105 &phys_lun_ext_entry->wwid));
2107 dev_warn(&ctrl_info->pci_dev->dev,
2108 "obtaining device info failed, skipping logical device %08x%08x\n",
2109 *((u32 *)&device->scsi3addr),
2110 *((u32 *)&device->scsi3addr[4]));
2115 pqi_assign_bus_target_lun(device);
2117 if (device->is_physical_device) {
2118 device->wwid = phys_lun_ext_entry->wwid;
2119 if ((phys_lun_ext_entry->device_flags &
2120 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2121 phys_lun_ext_entry->aio_handle) {
2122 device->aio_enabled = true;
2123 device->aio_handle =
2124 phys_lun_ext_entry->aio_handle;
2127 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
2128 sizeof(device->volume_id));
2131 if (pqi_is_device_with_sas_address(device))
2132 device->sas_address = get_unaligned_be64(&device->wwid);
2134 new_device_list[num_valid_devices++] = device;
2137 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2140 list_for_each_entry_safe(device, next, &new_device_list_head,
2141 new_device_list_entry) {
2142 if (device->keep_device)
2144 list_del(&device->new_device_list_entry);
2145 pqi_free_device(device);
2148 kfree(new_device_list);
2149 kfree(physdev_list);
2156 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2160 if (pqi_ctrl_offline(ctrl_info))
2163 if (!mutex_trylock(&ctrl_info->scan_mutex)) {
2164 pqi_schedule_rescan_worker_delayed(ctrl_info);
2167 rc = pqi_update_scsi_devices(ctrl_info);
2169 pqi_schedule_rescan_worker_delayed(ctrl_info);
2170 mutex_unlock(&ctrl_info->scan_mutex);
2176 static void pqi_scan_start(struct Scsi_Host *shost)
2178 struct pqi_ctrl_info *ctrl_info;
2180 ctrl_info = shost_to_hba(shost);
2181 if (pqi_ctrl_in_ofa(ctrl_info))
2184 pqi_scan_scsi_devices(ctrl_info);
2187 /* Returns TRUE if scan is finished. */
2189 static int pqi_scan_finished(struct Scsi_Host *shost,
2190 unsigned long elapsed_time)
2192 struct pqi_ctrl_info *ctrl_info;
2194 ctrl_info = shost_priv(shost);
2196 return !mutex_is_locked(&ctrl_info->scan_mutex);
2199 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2201 mutex_lock(&ctrl_info->scan_mutex);
2202 mutex_unlock(&ctrl_info->scan_mutex);
2205 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2207 mutex_lock(&ctrl_info->lun_reset_mutex);
2208 mutex_unlock(&ctrl_info->lun_reset_mutex);
2211 static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
2213 mutex_lock(&ctrl_info->ofa_mutex);
2214 mutex_unlock(&ctrl_info->ofa_mutex);
2217 static inline void pqi_set_encryption_info(
2218 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2221 u32 volume_blk_size;
2224 * Set the encryption tweak values based on logical block address.
2225 * If the block size is 512, the tweak value is equal to the LBA.
2226 * For other block sizes, tweak value is (LBA * block size) / 512.
2228 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2229 if (volume_blk_size != 512)
2230 first_block = (first_block * volume_blk_size) / 512;
2232 encryption_info->data_encryption_key_index =
2233 get_unaligned_le16(&raid_map->data_encryption_key_index);
2234 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2235 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2239 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2242 #define PQI_RAID_BYPASS_INELIGIBLE 1
2244 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2245 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2246 struct pqi_queue_group *queue_group)
2248 struct raid_map *raid_map;
2249 bool is_write = false;
2257 u32 first_row_offset;
2258 u32 last_row_offset;
2263 u32 r5or6_blocks_per_row;
2264 u64 r5or6_first_row;
2266 u32 r5or6_first_row_offset;
2267 u32 r5or6_last_row_offset;
2268 u32 r5or6_first_column;
2269 u32 r5or6_last_column;
2270 u16 data_disks_per_row;
2271 u32 total_disks_per_row;
2272 u16 layout_map_count;
2284 int offload_to_mirror;
2285 struct pqi_encryption_info *encryption_info_ptr;
2286 struct pqi_encryption_info encryption_info;
2287 #if BITS_PER_LONG == 32
2291 /* Check for valid opcode, get LBA and block count. */
2292 switch (scmd->cmnd[0]) {
2297 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2298 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2299 block_cnt = (u32)scmd->cmnd[4];
2307 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2308 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2314 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2315 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2321 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2322 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2325 /* Process via normal I/O path. */
2326 return PQI_RAID_BYPASS_INELIGIBLE;
2329 /* Check for write to non-RAID-0. */
2330 if (is_write && device->raid_level != SA_RAID_0)
2331 return PQI_RAID_BYPASS_INELIGIBLE;
2333 if (unlikely(block_cnt == 0))
2334 return PQI_RAID_BYPASS_INELIGIBLE;
2336 last_block = first_block + block_cnt - 1;
2337 raid_map = device->raid_map;
2339 /* Check for invalid block or wraparound. */
2340 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2341 last_block < first_block)
2342 return PQI_RAID_BYPASS_INELIGIBLE;
2344 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2345 strip_size = get_unaligned_le16(&raid_map->strip_size);
2346 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2348 /* Calculate stripe information for the request. */
2349 blocks_per_row = data_disks_per_row * strip_size;
2350 #if BITS_PER_LONG == 32
2351 tmpdiv = first_block;
2352 do_div(tmpdiv, blocks_per_row);
2354 tmpdiv = last_block;
2355 do_div(tmpdiv, blocks_per_row);
2357 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2358 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2359 tmpdiv = first_row_offset;
2360 do_div(tmpdiv, strip_size);
2361 first_column = tmpdiv;
2362 tmpdiv = last_row_offset;
2363 do_div(tmpdiv, strip_size);
2364 last_column = tmpdiv;
2366 first_row = first_block / blocks_per_row;
2367 last_row = last_block / blocks_per_row;
2368 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2369 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2370 first_column = first_row_offset / strip_size;
2371 last_column = last_row_offset / strip_size;
2374 /* If this isn't a single row/column then give to the controller. */
2375 if (first_row != last_row || first_column != last_column)
2376 return PQI_RAID_BYPASS_INELIGIBLE;
2378 /* Proceeding with driver mapping. */
2379 total_disks_per_row = data_disks_per_row +
2380 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2381 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2382 get_unaligned_le16(&raid_map->row_cnt);
2383 map_index = (map_row * total_disks_per_row) + first_column;
2386 if (device->raid_level == SA_RAID_1) {
2387 if (device->offload_to_mirror)
2388 map_index += data_disks_per_row;
2389 device->offload_to_mirror = !device->offload_to_mirror;
2390 } else if (device->raid_level == SA_RAID_ADM) {
2393 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2396 offload_to_mirror = device->offload_to_mirror;
2397 if (offload_to_mirror == 0) {
2398 /* use physical disk in the first mirrored group. */
2399 map_index %= data_disks_per_row;
2403 * Determine mirror group that map_index
2406 current_group = map_index / data_disks_per_row;
2408 if (offload_to_mirror != current_group) {
2410 layout_map_count - 1) {
2412 * Select raid index from
2415 map_index += data_disks_per_row;
2419 * Select raid index from first
2422 map_index %= data_disks_per_row;
2426 } while (offload_to_mirror != current_group);
2429 /* Set mirror group to use next time. */
2431 (offload_to_mirror >= layout_map_count - 1) ?
2432 0 : offload_to_mirror + 1;
2433 device->offload_to_mirror = offload_to_mirror;
2435 * Avoid direct use of device->offload_to_mirror within this
2436 * function since multiple threads might simultaneously
2437 * increment it beyond the range of device->layout_map_count -1.
2439 } else if ((device->raid_level == SA_RAID_5 ||
2440 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2442 /* Verify first and last block are in same RAID group */
2443 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2444 stripesize = r5or6_blocks_per_row * layout_map_count;
2445 #if BITS_PER_LONG == 32
2446 tmpdiv = first_block;
2447 first_group = do_div(tmpdiv, stripesize);
2448 tmpdiv = first_group;
2449 do_div(tmpdiv, r5or6_blocks_per_row);
2450 first_group = tmpdiv;
2451 tmpdiv = last_block;
2452 last_group = do_div(tmpdiv, stripesize);
2453 tmpdiv = last_group;
2454 do_div(tmpdiv, r5or6_blocks_per_row);
2455 last_group = tmpdiv;
2457 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2458 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2460 if (first_group != last_group)
2461 return PQI_RAID_BYPASS_INELIGIBLE;
2463 /* Verify request is in a single row of RAID 5/6 */
2464 #if BITS_PER_LONG == 32
2465 tmpdiv = first_block;
2466 do_div(tmpdiv, stripesize);
2467 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2468 tmpdiv = last_block;
2469 do_div(tmpdiv, stripesize);
2470 r5or6_last_row = r0_last_row = tmpdiv;
2472 first_row = r5or6_first_row = r0_first_row =
2473 first_block / stripesize;
2474 r5or6_last_row = r0_last_row = last_block / stripesize;
2476 if (r5or6_first_row != r5or6_last_row)
2477 return PQI_RAID_BYPASS_INELIGIBLE;
2479 /* Verify request is in a single column */
2480 #if BITS_PER_LONG == 32
2481 tmpdiv = first_block;
2482 first_row_offset = do_div(tmpdiv, stripesize);
2483 tmpdiv = first_row_offset;
2484 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2485 r5or6_first_row_offset = first_row_offset;
2486 tmpdiv = last_block;
2487 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2488 tmpdiv = r5or6_last_row_offset;
2489 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2490 tmpdiv = r5or6_first_row_offset;
2491 do_div(tmpdiv, strip_size);
2492 first_column = r5or6_first_column = tmpdiv;
2493 tmpdiv = r5or6_last_row_offset;
2494 do_div(tmpdiv, strip_size);
2495 r5or6_last_column = tmpdiv;
2497 first_row_offset = r5or6_first_row_offset =
2498 (u32)((first_block % stripesize) %
2499 r5or6_blocks_per_row);
2501 r5or6_last_row_offset =
2502 (u32)((last_block % stripesize) %
2503 r5or6_blocks_per_row);
2505 first_column = r5or6_first_row_offset / strip_size;
2506 r5or6_first_column = first_column;
2507 r5or6_last_column = r5or6_last_row_offset / strip_size;
2509 if (r5or6_first_column != r5or6_last_column)
2510 return PQI_RAID_BYPASS_INELIGIBLE;
2512 /* Request is eligible */
2514 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2515 get_unaligned_le16(&raid_map->row_cnt);
2517 map_index = (first_group *
2518 (get_unaligned_le16(&raid_map->row_cnt) *
2519 total_disks_per_row)) +
2520 (map_row * total_disks_per_row) + first_column;
2523 aio_handle = raid_map->disk_data[map_index].aio_handle;
2524 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2525 first_row * strip_size +
2526 (first_row_offset - first_column * strip_size);
2527 disk_block_cnt = block_cnt;
2529 /* Handle differing logical/physical block sizes. */
2530 if (raid_map->phys_blk_shift) {
2531 disk_block <<= raid_map->phys_blk_shift;
2532 disk_block_cnt <<= raid_map->phys_blk_shift;
2535 if (unlikely(disk_block_cnt > 0xffff))
2536 return PQI_RAID_BYPASS_INELIGIBLE;
2538 /* Build the new CDB for the physical disk I/O. */
2539 if (disk_block > 0xffffffff) {
2540 cdb[0] = is_write ? WRITE_16 : READ_16;
2542 put_unaligned_be64(disk_block, &cdb[2]);
2543 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2548 cdb[0] = is_write ? WRITE_10 : READ_10;
2550 put_unaligned_be32((u32)disk_block, &cdb[2]);
2552 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2557 if (get_unaligned_le16(&raid_map->flags) &
2558 RAID_MAP_ENCRYPTION_ENABLED) {
2559 pqi_set_encryption_info(&encryption_info, raid_map,
2561 encryption_info_ptr = &encryption_info;
2563 encryption_info_ptr = NULL;
2566 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2567 cdb, cdb_length, queue_group, encryption_info_ptr, true);
2570 #define PQI_STATUS_IDLE 0x0
2572 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2573 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2575 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2576 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2577 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2578 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2579 #define PQI_DEVICE_STATE_ERROR 0x4
2581 #define PQI_MODE_READY_TIMEOUT_SECS 30
2582 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2584 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2586 struct pqi_device_registers __iomem *pqi_registers;
2587 unsigned long timeout;
2591 pqi_registers = ctrl_info->pqi_registers;
2592 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
2595 signature = readq(&pqi_registers->signature);
2596 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2597 sizeof(signature)) == 0)
2599 if (time_after(jiffies, timeout)) {
2600 dev_err(&ctrl_info->pci_dev->dev,
2601 "timed out waiting for PQI signature\n");
2604 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2608 status = readb(&pqi_registers->function_and_status_code);
2609 if (status == PQI_STATUS_IDLE)
2611 if (time_after(jiffies, timeout)) {
2612 dev_err(&ctrl_info->pci_dev->dev,
2613 "timed out waiting for PQI IDLE\n");
2616 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2620 if (readl(&pqi_registers->device_status) ==
2621 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2623 if (time_after(jiffies, timeout)) {
2624 dev_err(&ctrl_info->pci_dev->dev,
2625 "timed out waiting for PQI all registers ready\n");
2628 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2634 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2636 struct pqi_scsi_dev *device;
2638 device = io_request->scmd->device->hostdata;
2639 device->raid_bypass_enabled = false;
2640 device->aio_enabled = false;
2643 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
2645 struct pqi_ctrl_info *ctrl_info;
2646 struct pqi_scsi_dev *device;
2648 device = sdev->hostdata;
2649 if (device->device_offline)
2652 device->device_offline = true;
2653 ctrl_info = shost_to_hba(sdev->host);
2654 pqi_schedule_rescan_worker(ctrl_info);
2655 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
2656 path, ctrl_info->scsi_host->host_no, device->bus,
2657 device->target, device->lun);
2660 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2664 struct scsi_cmnd *scmd;
2665 struct pqi_raid_error_info *error_info;
2666 size_t sense_data_length;
2669 struct scsi_sense_hdr sshdr;
2671 scmd = io_request->scmd;
2675 error_info = io_request->error_info;
2676 scsi_status = error_info->status;
2679 switch (error_info->data_out_result) {
2680 case PQI_DATA_IN_OUT_GOOD:
2682 case PQI_DATA_IN_OUT_UNDERFLOW:
2684 get_unaligned_le32(&error_info->data_out_transferred);
2685 residual_count = scsi_bufflen(scmd) - xfer_count;
2686 scsi_set_resid(scmd, residual_count);
2687 if (xfer_count < scmd->underflow)
2688 host_byte = DID_SOFT_ERROR;
2690 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2691 case PQI_DATA_IN_OUT_ABORTED:
2692 host_byte = DID_ABORT;
2694 case PQI_DATA_IN_OUT_TIMEOUT:
2695 host_byte = DID_TIME_OUT;
2697 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2698 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2699 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2700 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2701 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2702 case PQI_DATA_IN_OUT_ERROR:
2703 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2704 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2705 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2706 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2707 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2708 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2709 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2710 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2711 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2712 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2714 host_byte = DID_ERROR;
2718 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2719 if (sense_data_length == 0)
2721 get_unaligned_le16(&error_info->response_data_length);
2722 if (sense_data_length) {
2723 if (sense_data_length > sizeof(error_info->data))
2724 sense_data_length = sizeof(error_info->data);
2726 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2727 scsi_normalize_sense(error_info->data,
2728 sense_data_length, &sshdr) &&
2729 sshdr.sense_key == HARDWARE_ERROR &&
2730 sshdr.asc == 0x3e) {
2731 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
2732 struct pqi_scsi_dev *device = scmd->device->hostdata;
2734 switch (sshdr.ascq) {
2735 case 0x1: /* LOGICAL UNIT FAILURE */
2736 if (printk_ratelimit())
2737 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
2738 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2739 pqi_take_device_offline(scmd->device, "RAID");
2740 host_byte = DID_NO_CONNECT;
2743 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
2744 if (printk_ratelimit())
2745 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
2746 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2751 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2752 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2753 memcpy(scmd->sense_buffer, error_info->data,
2757 scmd->result = scsi_status;
2758 set_host_byte(scmd, host_byte);
2761 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2765 struct scsi_cmnd *scmd;
2766 struct pqi_aio_error_info *error_info;
2767 size_t sense_data_length;
2770 bool device_offline;
2772 scmd = io_request->scmd;
2773 error_info = io_request->error_info;
2775 sense_data_length = 0;
2776 device_offline = false;
2778 switch (error_info->service_response) {
2779 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2780 scsi_status = error_info->status;
2782 case PQI_AIO_SERV_RESPONSE_FAILURE:
2783 switch (error_info->status) {
2784 case PQI_AIO_STATUS_IO_ABORTED:
2785 scsi_status = SAM_STAT_TASK_ABORTED;
2787 case PQI_AIO_STATUS_UNDERRUN:
2788 scsi_status = SAM_STAT_GOOD;
2789 residual_count = get_unaligned_le32(
2790 &error_info->residual_count);
2791 scsi_set_resid(scmd, residual_count);
2792 xfer_count = scsi_bufflen(scmd) - residual_count;
2793 if (xfer_count < scmd->underflow)
2794 host_byte = DID_SOFT_ERROR;
2796 case PQI_AIO_STATUS_OVERRUN:
2797 scsi_status = SAM_STAT_GOOD;
2799 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2800 pqi_aio_path_disabled(io_request);
2801 scsi_status = SAM_STAT_GOOD;
2802 io_request->status = -EAGAIN;
2804 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2805 case PQI_AIO_STATUS_INVALID_DEVICE:
2806 if (!io_request->raid_bypass) {
2807 device_offline = true;
2808 pqi_take_device_offline(scmd->device, "AIO");
2809 host_byte = DID_NO_CONNECT;
2811 scsi_status = SAM_STAT_CHECK_CONDITION;
2813 case PQI_AIO_STATUS_IO_ERROR:
2815 scsi_status = SAM_STAT_CHECK_CONDITION;
2819 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2820 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2821 scsi_status = SAM_STAT_GOOD;
2823 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2824 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2826 scsi_status = SAM_STAT_CHECK_CONDITION;
2830 if (error_info->data_present) {
2832 get_unaligned_le16(&error_info->data_length);
2833 if (sense_data_length) {
2834 if (sense_data_length > sizeof(error_info->data))
2835 sense_data_length = sizeof(error_info->data);
2836 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2837 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2838 memcpy(scmd->sense_buffer, error_info->data,
2843 if (device_offline && sense_data_length == 0)
2844 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2847 scmd->result = scsi_status;
2848 set_host_byte(scmd, host_byte);
2851 static void pqi_process_io_error(unsigned int iu_type,
2852 struct pqi_io_request *io_request)
2855 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2856 pqi_process_raid_io_error(io_request);
2858 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2859 pqi_process_aio_io_error(io_request);
2864 static int pqi_interpret_task_management_response(
2865 struct pqi_task_management_response *response)
2869 switch (response->response_code) {
2870 case SOP_TMF_COMPLETE:
2871 case SOP_TMF_FUNCTION_SUCCEEDED:
2874 case SOP_TMF_REJECTED:
2885 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
2887 pqi_take_ctrl_offline(ctrl_info);
2890 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
2895 struct pqi_io_request *io_request;
2896 struct pqi_io_response *response;
2900 oq_ci = queue_group->oq_ci_copy;
2903 oq_pi = readl(queue_group->oq_pi);
2904 if (oq_pi >= ctrl_info->num_elements_per_oq) {
2905 pqi_invalid_response(ctrl_info);
2906 dev_err(&ctrl_info->pci_dev->dev,
2907 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
2908 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
2915 response = queue_group->oq_element_array +
2916 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2918 request_id = get_unaligned_le16(&response->request_id);
2919 if (request_id >= ctrl_info->max_io_slots) {
2920 pqi_invalid_response(ctrl_info);
2921 dev_err(&ctrl_info->pci_dev->dev,
2922 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
2923 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
2927 io_request = &ctrl_info->io_request_pool[request_id];
2928 if (atomic_read(&io_request->refcount) == 0) {
2929 pqi_invalid_response(ctrl_info);
2930 dev_err(&ctrl_info->pci_dev->dev,
2931 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
2932 request_id, oq_pi, oq_ci);
2936 switch (response->header.iu_type) {
2937 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2938 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2939 if (io_request->scmd)
2940 io_request->scmd->result = 0;
2942 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2944 case PQI_RESPONSE_IU_VENDOR_GENERAL:
2945 io_request->status =
2947 &((struct pqi_vendor_general_response *)
2950 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2951 io_request->status =
2952 pqi_interpret_task_management_response(
2955 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2956 pqi_aio_path_disabled(io_request);
2957 io_request->status = -EAGAIN;
2959 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2960 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2961 io_request->error_info = ctrl_info->error_buffer +
2962 (get_unaligned_le16(&response->error_index) *
2963 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2964 pqi_process_io_error(response->header.iu_type, io_request);
2967 pqi_invalid_response(ctrl_info);
2968 dev_err(&ctrl_info->pci_dev->dev,
2969 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
2970 response->header.iu_type, oq_pi, oq_ci);
2974 io_request->io_complete_callback(io_request, io_request->context);
2977 * Note that the I/O request structure CANNOT BE TOUCHED after
2978 * returning from the I/O completion callback!
2980 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2983 if (num_responses) {
2984 queue_group->oq_ci_copy = oq_ci;
2985 writel(oq_ci, queue_group->oq_ci);
2988 return num_responses;
2991 static inline unsigned int pqi_num_elements_free(unsigned int pi,
2992 unsigned int ci, unsigned int elements_in_queue)
2994 unsigned int num_elements_used;
2997 num_elements_used = pi - ci;
2999 num_elements_used = elements_in_queue - ci + pi;
3001 return elements_in_queue - num_elements_used - 1;
3004 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3005 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3009 unsigned long flags;
3011 struct pqi_queue_group *queue_group;
3013 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3014 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3017 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3019 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3020 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3022 if (pqi_num_elements_free(iq_pi, iq_ci,
3023 ctrl_info->num_elements_per_iq))
3026 spin_unlock_irqrestore(
3027 &queue_group->submit_lock[RAID_PATH], flags);
3029 if (pqi_ctrl_offline(ctrl_info))
3033 next_element = queue_group->iq_element_array[RAID_PATH] +
3034 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3036 memcpy(next_element, iu, iu_length);
3038 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3039 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3042 * This write notifies the controller that an IU is available to be
3045 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3047 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3050 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3051 struct pqi_event *event)
3053 struct pqi_event_acknowledge_request request;
3055 memset(&request, 0, sizeof(request));
3057 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3058 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3059 &request.header.iu_length);
3060 request.event_type = event->event_type;
3061 request.event_id = event->event_id;
3062 request.additional_event_id = event->additional_event_id;
3064 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3067 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3068 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3070 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3071 struct pqi_ctrl_info *ctrl_info)
3073 unsigned long timeout;
3076 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
3079 status = pqi_read_soft_reset_status(ctrl_info);
3080 if (status & PQI_SOFT_RESET_INITIATE)
3081 return RESET_INITIATE_DRIVER;
3083 if (status & PQI_SOFT_RESET_ABORT)
3086 if (time_after(jiffies, timeout)) {
3087 dev_err(&ctrl_info->pci_dev->dev,
3088 "timed out waiting for soft reset status\n");
3089 return RESET_TIMEDOUT;
3092 if (!sis_is_firmware_running(ctrl_info))
3093 return RESET_NORESPONSE;
3095 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3099 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
3100 enum pqi_soft_reset_status reset_status)
3104 switch (reset_status) {
3105 case RESET_INITIATE_DRIVER:
3106 case RESET_TIMEDOUT:
3107 dev_info(&ctrl_info->pci_dev->dev,
3108 "resetting controller %u\n", ctrl_info->ctrl_id);
3109 sis_soft_reset(ctrl_info);
3111 case RESET_INITIATE_FIRMWARE:
3112 rc = pqi_ofa_ctrl_restart(ctrl_info);
3113 pqi_ofa_free_host_buffer(ctrl_info);
3114 dev_info(&ctrl_info->pci_dev->dev,
3115 "Online Firmware Activation for controller %u: %s\n",
3116 ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
3119 pqi_ofa_ctrl_unquiesce(ctrl_info);
3120 dev_info(&ctrl_info->pci_dev->dev,
3121 "Online Firmware Activation for controller %u: %s\n",
3122 ctrl_info->ctrl_id, "ABORTED");
3124 case RESET_NORESPONSE:
3125 pqi_ofa_free_host_buffer(ctrl_info);
3126 pqi_take_ctrl_offline(ctrl_info);
3131 static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3132 struct pqi_event *event)
3135 enum pqi_soft_reset_status status;
3137 event_id = get_unaligned_le16(&event->event_id);
3139 mutex_lock(&ctrl_info->ofa_mutex);
3141 if (event_id == PQI_EVENT_OFA_QUIESCE) {
3142 dev_info(&ctrl_info->pci_dev->dev,
3143 "Received Online Firmware Activation quiesce event for controller %u\n",
3144 ctrl_info->ctrl_id);
3145 pqi_ofa_ctrl_quiesce(ctrl_info);
3146 pqi_acknowledge_event(ctrl_info, event);
3147 if (ctrl_info->soft_reset_handshake_supported) {
3148 status = pqi_poll_for_soft_reset_status(ctrl_info);
3149 pqi_process_soft_reset(ctrl_info, status);
3151 pqi_process_soft_reset(ctrl_info,
3152 RESET_INITIATE_FIRMWARE);
3155 } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3156 pqi_acknowledge_event(ctrl_info, event);
3157 pqi_ofa_setup_host_buffer(ctrl_info,
3158 le32_to_cpu(event->ofa_bytes_requested));
3159 pqi_ofa_host_memory_update(ctrl_info);
3160 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3161 pqi_ofa_free_host_buffer(ctrl_info);
3162 pqi_acknowledge_event(ctrl_info, event);
3163 dev_info(&ctrl_info->pci_dev->dev,
3164 "Online Firmware Activation(%u) cancel reason : %u\n",
3165 ctrl_info->ctrl_id, event->ofa_cancel_reason);
3168 mutex_unlock(&ctrl_info->ofa_mutex);
3171 static void pqi_event_worker(struct work_struct *work)
3174 struct pqi_ctrl_info *ctrl_info;
3175 struct pqi_event *event;
3177 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3179 pqi_ctrl_busy(ctrl_info);
3180 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
3181 if (pqi_ctrl_offline(ctrl_info))
3184 pqi_schedule_rescan_worker_delayed(ctrl_info);
3186 event = ctrl_info->events;
3187 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3188 if (event->pending) {
3189 event->pending = false;
3190 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3191 pqi_ctrl_unbusy(ctrl_info);
3192 pqi_ofa_process_event(ctrl_info, event);
3195 pqi_acknowledge_event(ctrl_info, event);
3201 pqi_ctrl_unbusy(ctrl_info);
3204 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
3206 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3209 u32 heartbeat_count;
3210 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
3213 pqi_check_ctrl_health(ctrl_info);
3214 if (pqi_ctrl_offline(ctrl_info))
3217 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3218 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3220 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3221 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3222 dev_err(&ctrl_info->pci_dev->dev,
3223 "no heartbeat detected - last heartbeat count: %u\n",
3225 pqi_take_ctrl_offline(ctrl_info);
3229 ctrl_info->previous_num_interrupts = num_interrupts;
3232 ctrl_info->previous_heartbeat_count = heartbeat_count;
3233 mod_timer(&ctrl_info->heartbeat_timer,
3234 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3237 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3239 if (!ctrl_info->heartbeat_counter)
3242 ctrl_info->previous_num_interrupts =
3243 atomic_read(&ctrl_info->num_interrupts);
3244 ctrl_info->previous_heartbeat_count =
3245 pqi_read_heartbeat_counter(ctrl_info);
3247 ctrl_info->heartbeat_timer.expires =
3248 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3249 add_timer(&ctrl_info->heartbeat_timer);
3252 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3254 del_timer_sync(&ctrl_info->heartbeat_timer);
3257 static inline int pqi_event_type_to_event_index(unsigned int event_type)
3261 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
3262 if (event_type == pqi_supported_event_types[index])
3268 static inline bool pqi_is_supported_event(unsigned int event_type)
3270 return pqi_event_type_to_event_index(event_type) != -1;
3273 static void pqi_ofa_capture_event_payload(struct pqi_event *event,
3274 struct pqi_event_response *response)
3278 event_id = get_unaligned_le16(&event->event_id);
3280 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3281 if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3282 event->ofa_bytes_requested =
3283 response->data.ofa_memory_allocation.bytes_requested;
3284 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3285 event->ofa_cancel_reason =
3286 response->data.ofa_cancelled.reason;
3291 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3296 struct pqi_event_queue *event_queue;
3297 struct pqi_event_response *response;
3298 struct pqi_event *event;
3301 event_queue = &ctrl_info->event_queue;
3303 oq_ci = event_queue->oq_ci_copy;
3306 oq_pi = readl(event_queue->oq_pi);
3307 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3308 pqi_invalid_response(ctrl_info);
3309 dev_err(&ctrl_info->pci_dev->dev,
3310 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3311 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3319 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3322 pqi_event_type_to_event_index(response->event_type);
3324 if (event_index >= 0 && response->request_acknowledge) {
3325 event = &ctrl_info->events[event_index];
3326 event->pending = true;
3327 event->event_type = response->event_type;
3328 event->event_id = response->event_id;
3329 event->additional_event_id = response->additional_event_id;
3330 if (event->event_type == PQI_EVENT_TYPE_OFA)
3331 pqi_ofa_capture_event_payload(event, response);
3334 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3338 event_queue->oq_ci_copy = oq_ci;
3339 writel(oq_ci, event_queue->oq_ci);
3340 schedule_work(&ctrl_info->event_work);
3346 #define PQI_LEGACY_INTX_MASK 0x1
3348 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
3352 struct pqi_device_registers __iomem *pqi_registers;
3353 volatile void __iomem *register_addr;
3355 pqi_registers = ctrl_info->pqi_registers;
3358 register_addr = &pqi_registers->legacy_intx_mask_clear;
3360 register_addr = &pqi_registers->legacy_intx_mask_set;
3362 intx_mask = readl(register_addr);
3363 intx_mask |= PQI_LEGACY_INTX_MASK;
3364 writel(intx_mask, register_addr);
3367 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3368 enum pqi_irq_mode new_mode)
3370 switch (ctrl_info->irq_mode) {
3376 pqi_configure_legacy_intx(ctrl_info, true);
3377 sis_enable_intx(ctrl_info);
3386 pqi_configure_legacy_intx(ctrl_info, false);
3387 sis_enable_msix(ctrl_info);
3392 pqi_configure_legacy_intx(ctrl_info, false);
3399 sis_enable_msix(ctrl_info);
3402 pqi_configure_legacy_intx(ctrl_info, true);
3403 sis_enable_intx(ctrl_info);
3411 ctrl_info->irq_mode = new_mode;
3414 #define PQI_LEGACY_INTX_PENDING 0x1
3416 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3421 switch (ctrl_info->irq_mode) {
3427 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3428 if (intx_status & PQI_LEGACY_INTX_PENDING)
3442 static irqreturn_t pqi_irq_handler(int irq, void *data)
3444 struct pqi_ctrl_info *ctrl_info;
3445 struct pqi_queue_group *queue_group;
3446 int num_io_responses_handled;
3447 int num_events_handled;
3450 ctrl_info = queue_group->ctrl_info;
3452 if (!pqi_is_valid_irq(ctrl_info))
3455 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3456 if (num_io_responses_handled < 0)
3459 if (irq == ctrl_info->event_irq) {
3460 num_events_handled = pqi_process_event_intr(ctrl_info);
3461 if (num_events_handled < 0)
3464 num_events_handled = 0;
3467 if (num_io_responses_handled + num_events_handled > 0)
3468 atomic_inc(&ctrl_info->num_interrupts);
3470 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3471 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3477 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3479 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3483 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3485 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3486 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3487 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3489 dev_err(&pci_dev->dev,
3490 "irq %u init failed with error %d\n",
3491 pci_irq_vector(pci_dev, i), rc);
3494 ctrl_info->num_msix_vectors_initialized++;
3500 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3504 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3505 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3506 &ctrl_info->queue_groups[i]);
3508 ctrl_info->num_msix_vectors_initialized = 0;
3511 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3513 int num_vectors_enabled;
3515 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3516 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3517 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3518 if (num_vectors_enabled < 0) {
3519 dev_err(&ctrl_info->pci_dev->dev,
3520 "MSI-X init failed with error %d\n",
3521 num_vectors_enabled);
3522 return num_vectors_enabled;
3525 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
3526 ctrl_info->irq_mode = IRQ_MODE_MSIX;
3530 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3532 if (ctrl_info->num_msix_vectors_enabled) {
3533 pci_free_irq_vectors(ctrl_info->pci_dev);
3534 ctrl_info->num_msix_vectors_enabled = 0;
3538 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3541 size_t alloc_length;
3542 size_t element_array_length_per_iq;
3543 size_t element_array_length_per_oq;
3544 void *element_array;
3545 void __iomem *next_queue_index;
3546 void *aligned_pointer;
3547 unsigned int num_inbound_queues;
3548 unsigned int num_outbound_queues;
3549 unsigned int num_queue_indexes;
3550 struct pqi_queue_group *queue_group;
3552 element_array_length_per_iq =
3553 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3554 ctrl_info->num_elements_per_iq;
3555 element_array_length_per_oq =
3556 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3557 ctrl_info->num_elements_per_oq;
3558 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3559 num_outbound_queues = ctrl_info->num_queue_groups;
3560 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3562 aligned_pointer = NULL;
3564 for (i = 0; i < num_inbound_queues; i++) {
3565 aligned_pointer = PTR_ALIGN(aligned_pointer,
3566 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3567 aligned_pointer += element_array_length_per_iq;
3570 for (i = 0; i < num_outbound_queues; i++) {
3571 aligned_pointer = PTR_ALIGN(aligned_pointer,
3572 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3573 aligned_pointer += element_array_length_per_oq;
3576 aligned_pointer = PTR_ALIGN(aligned_pointer,
3577 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3578 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3579 PQI_EVENT_OQ_ELEMENT_LENGTH;
3581 for (i = 0; i < num_queue_indexes; i++) {
3582 aligned_pointer = PTR_ALIGN(aligned_pointer,
3583 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3584 aligned_pointer += sizeof(pqi_index_t);
3587 alloc_length = (size_t)aligned_pointer +
3588 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3590 alloc_length += PQI_EXTRA_SGL_MEMORY;
3592 ctrl_info->queue_memory_base =
3593 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3594 &ctrl_info->queue_memory_base_dma_handle,
3597 if (!ctrl_info->queue_memory_base)
3600 ctrl_info->queue_memory_length = alloc_length;
3602 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3603 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3605 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3606 queue_group = &ctrl_info->queue_groups[i];
3607 queue_group->iq_element_array[RAID_PATH] = element_array;
3608 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3609 ctrl_info->queue_memory_base_dma_handle +
3610 (element_array - ctrl_info->queue_memory_base);
3611 element_array += element_array_length_per_iq;
3612 element_array = PTR_ALIGN(element_array,
3613 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3614 queue_group->iq_element_array[AIO_PATH] = element_array;
3615 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3616 ctrl_info->queue_memory_base_dma_handle +
3617 (element_array - ctrl_info->queue_memory_base);
3618 element_array += element_array_length_per_iq;
3619 element_array = PTR_ALIGN(element_array,
3620 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3623 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3624 queue_group = &ctrl_info->queue_groups[i];
3625 queue_group->oq_element_array = element_array;
3626 queue_group->oq_element_array_bus_addr =
3627 ctrl_info->queue_memory_base_dma_handle +
3628 (element_array - ctrl_info->queue_memory_base);
3629 element_array += element_array_length_per_oq;
3630 element_array = PTR_ALIGN(element_array,
3631 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3634 ctrl_info->event_queue.oq_element_array = element_array;
3635 ctrl_info->event_queue.oq_element_array_bus_addr =
3636 ctrl_info->queue_memory_base_dma_handle +
3637 (element_array - ctrl_info->queue_memory_base);
3638 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3639 PQI_EVENT_OQ_ELEMENT_LENGTH;
3641 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
3642 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3644 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3645 queue_group = &ctrl_info->queue_groups[i];
3646 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3647 queue_group->iq_ci_bus_addr[RAID_PATH] =
3648 ctrl_info->queue_memory_base_dma_handle +
3650 (void __iomem *)ctrl_info->queue_memory_base);
3651 next_queue_index += sizeof(pqi_index_t);
3652 next_queue_index = PTR_ALIGN(next_queue_index,
3653 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3654 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3655 queue_group->iq_ci_bus_addr[AIO_PATH] =
3656 ctrl_info->queue_memory_base_dma_handle +
3658 (void __iomem *)ctrl_info->queue_memory_base);
3659 next_queue_index += sizeof(pqi_index_t);
3660 next_queue_index = PTR_ALIGN(next_queue_index,
3661 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3662 queue_group->oq_pi = next_queue_index;
3663 queue_group->oq_pi_bus_addr =
3664 ctrl_info->queue_memory_base_dma_handle +
3666 (void __iomem *)ctrl_info->queue_memory_base);
3667 next_queue_index += sizeof(pqi_index_t);
3668 next_queue_index = PTR_ALIGN(next_queue_index,
3669 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3672 ctrl_info->event_queue.oq_pi = next_queue_index;
3673 ctrl_info->event_queue.oq_pi_bus_addr =
3674 ctrl_info->queue_memory_base_dma_handle +
3676 (void __iomem *)ctrl_info->queue_memory_base);
3681 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3684 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3685 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3688 * Initialize the backpointers to the controller structure in
3689 * each operational queue group structure.
3691 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3692 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3695 * Assign IDs to all operational queues. Note that the IDs
3696 * assigned to operational IQs are independent of the IDs
3697 * assigned to operational OQs.
3699 ctrl_info->event_queue.oq_id = next_oq_id++;
3700 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3701 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3702 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3703 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3707 * Assign MSI-X table entry indexes to all queues. Note that the
3708 * interrupt for the event queue is shared with the first queue group.
3710 ctrl_info->event_queue.int_msg_num = 0;
3711 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3712 ctrl_info->queue_groups[i].int_msg_num = i;
3714 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3715 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3716 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3717 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3718 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3722 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3724 size_t alloc_length;
3725 struct pqi_admin_queues_aligned *admin_queues_aligned;
3726 struct pqi_admin_queues *admin_queues;
3728 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3729 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3731 ctrl_info->admin_queue_memory_base =
3732 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3733 &ctrl_info->admin_queue_memory_base_dma_handle,
3736 if (!ctrl_info->admin_queue_memory_base)
3739 ctrl_info->admin_queue_memory_length = alloc_length;
3741 admin_queues = &ctrl_info->admin_queues;
3742 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3743 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3744 admin_queues->iq_element_array =
3745 &admin_queues_aligned->iq_element_array;
3746 admin_queues->oq_element_array =
3747 &admin_queues_aligned->oq_element_array;
3748 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3749 admin_queues->oq_pi =
3750 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
3752 admin_queues->iq_element_array_bus_addr =
3753 ctrl_info->admin_queue_memory_base_dma_handle +
3754 (admin_queues->iq_element_array -
3755 ctrl_info->admin_queue_memory_base);
3756 admin_queues->oq_element_array_bus_addr =
3757 ctrl_info->admin_queue_memory_base_dma_handle +
3758 (admin_queues->oq_element_array -
3759 ctrl_info->admin_queue_memory_base);
3760 admin_queues->iq_ci_bus_addr =
3761 ctrl_info->admin_queue_memory_base_dma_handle +
3762 ((void *)admin_queues->iq_ci -
3763 ctrl_info->admin_queue_memory_base);
3764 admin_queues->oq_pi_bus_addr =
3765 ctrl_info->admin_queue_memory_base_dma_handle +
3766 ((void __iomem *)admin_queues->oq_pi -
3767 (void __iomem *)ctrl_info->admin_queue_memory_base);
3772 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
3773 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3775 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3777 struct pqi_device_registers __iomem *pqi_registers;
3778 struct pqi_admin_queues *admin_queues;
3779 unsigned long timeout;
3783 pqi_registers = ctrl_info->pqi_registers;
3784 admin_queues = &ctrl_info->admin_queues;
3786 writeq((u64)admin_queues->iq_element_array_bus_addr,
3787 &pqi_registers->admin_iq_element_array_addr);
3788 writeq((u64)admin_queues->oq_element_array_bus_addr,
3789 &pqi_registers->admin_oq_element_array_addr);
3790 writeq((u64)admin_queues->iq_ci_bus_addr,
3791 &pqi_registers->admin_iq_ci_addr);
3792 writeq((u64)admin_queues->oq_pi_bus_addr,
3793 &pqi_registers->admin_oq_pi_addr);
3795 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3796 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
3797 (admin_queues->int_msg_num << 16);
3798 writel(reg, &pqi_registers->admin_iq_num_elements);
3799 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3800 &pqi_registers->function_and_status_code);
3802 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3804 status = readb(&pqi_registers->function_and_status_code);
3805 if (status == PQI_STATUS_IDLE)
3807 if (time_after(jiffies, timeout))
3809 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3813 * The offset registers are not initialized to the correct
3814 * offsets until *after* the create admin queue pair command
3815 * completes successfully.
3817 admin_queues->iq_pi = ctrl_info->iomem_base +
3818 PQI_DEVICE_REGISTERS_OFFSET +
3819 readq(&pqi_registers->admin_iq_pi_offset);
3820 admin_queues->oq_ci = ctrl_info->iomem_base +
3821 PQI_DEVICE_REGISTERS_OFFSET +
3822 readq(&pqi_registers->admin_oq_ci_offset);
3827 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3828 struct pqi_general_admin_request *request)
3830 struct pqi_admin_queues *admin_queues;
3834 admin_queues = &ctrl_info->admin_queues;
3835 iq_pi = admin_queues->iq_pi_copy;
3837 next_element = admin_queues->iq_element_array +
3838 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3840 memcpy(next_element, request, sizeof(*request));
3842 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3843 admin_queues->iq_pi_copy = iq_pi;
3846 * This write notifies the controller that an IU is available to be
3849 writel(iq_pi, admin_queues->iq_pi);
3852 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
3854 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3855 struct pqi_general_admin_response *response)
3857 struct pqi_admin_queues *admin_queues;
3860 unsigned long timeout;
3862 admin_queues = &ctrl_info->admin_queues;
3863 oq_ci = admin_queues->oq_ci_copy;
3865 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
3868 oq_pi = readl(admin_queues->oq_pi);
3871 if (time_after(jiffies, timeout)) {
3872 dev_err(&ctrl_info->pci_dev->dev,
3873 "timed out waiting for admin response\n");
3876 if (!sis_is_firmware_running(ctrl_info))
3878 usleep_range(1000, 2000);
3881 memcpy(response, admin_queues->oq_element_array +
3882 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3884 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3885 admin_queues->oq_ci_copy = oq_ci;
3886 writel(oq_ci, admin_queues->oq_ci);
3891 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3892 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3893 struct pqi_io_request *io_request)
3895 struct pqi_io_request *next;
3900 unsigned long flags;
3901 unsigned int num_elements_needed;
3902 unsigned int num_elements_to_end_of_queue;
3904 struct pqi_iu_header *request;
3906 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3909 io_request->queue_group = queue_group;
3910 list_add_tail(&io_request->request_list_entry,
3911 &queue_group->request_list[path]);
3914 iq_pi = queue_group->iq_pi_copy[path];
3916 list_for_each_entry_safe(io_request, next,
3917 &queue_group->request_list[path], request_list_entry) {
3919 request = io_request->iu;
3921 iu_length = get_unaligned_le16(&request->iu_length) +
3922 PQI_REQUEST_HEADER_LENGTH;
3923 num_elements_needed =
3924 DIV_ROUND_UP(iu_length,
3925 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3927 iq_ci = readl(queue_group->iq_ci[path]);
3929 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3930 ctrl_info->num_elements_per_iq))
3933 put_unaligned_le16(queue_group->oq_id,
3934 &request->response_queue_id);
3936 next_element = queue_group->iq_element_array[path] +
3937 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3939 num_elements_to_end_of_queue =
3940 ctrl_info->num_elements_per_iq - iq_pi;
3942 if (num_elements_needed <= num_elements_to_end_of_queue) {
3943 memcpy(next_element, request, iu_length);
3945 copy_count = num_elements_to_end_of_queue *
3946 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3947 memcpy(next_element, request, copy_count);
3948 memcpy(queue_group->iq_element_array[path],
3949 (u8 *)request + copy_count,
3950 iu_length - copy_count);
3953 iq_pi = (iq_pi + num_elements_needed) %
3954 ctrl_info->num_elements_per_iq;
3956 list_del(&io_request->request_list_entry);
3959 if (iq_pi != queue_group->iq_pi_copy[path]) {
3960 queue_group->iq_pi_copy[path] = iq_pi;
3962 * This write notifies the controller that one or more IUs are
3963 * available to be processed.
3965 writel(iq_pi, queue_group->iq_pi[path]);
3968 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3971 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3973 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3974 struct completion *wait)
3979 if (wait_for_completion_io_timeout(wait,
3980 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
3985 pqi_check_ctrl_health(ctrl_info);
3986 if (pqi_ctrl_offline(ctrl_info)) {
3995 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3998 struct completion *waiting = context;
4003 static int pqi_process_raid_io_error_synchronous(
4004 struct pqi_raid_error_info *error_info)
4008 switch (error_info->data_out_result) {
4009 case PQI_DATA_IN_OUT_GOOD:
4010 if (error_info->status == SAM_STAT_GOOD)
4013 case PQI_DATA_IN_OUT_UNDERFLOW:
4014 if (error_info->status == SAM_STAT_GOOD ||
4015 error_info->status == SAM_STAT_CHECK_CONDITION)
4018 case PQI_DATA_IN_OUT_ABORTED:
4019 rc = PQI_CMD_STATUS_ABORTED;
4026 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4027 struct pqi_iu_header *request, unsigned int flags,
4028 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
4031 struct pqi_io_request *io_request;
4032 unsigned long start_jiffies;
4033 unsigned long msecs_blocked;
4035 DECLARE_COMPLETION_ONSTACK(wait);
4038 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
4039 * are mutually exclusive.
4042 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4043 if (down_interruptible(&ctrl_info->sync_request_sem))
4044 return -ERESTARTSYS;
4046 if (timeout_msecs == NO_TIMEOUT) {
4047 down(&ctrl_info->sync_request_sem);
4049 start_jiffies = jiffies;
4050 if (down_timeout(&ctrl_info->sync_request_sem,
4051 msecs_to_jiffies(timeout_msecs)))
4054 jiffies_to_msecs(jiffies - start_jiffies);
4055 if (msecs_blocked >= timeout_msecs) {
4059 timeout_msecs -= msecs_blocked;
4063 pqi_ctrl_busy(ctrl_info);
4064 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
4065 if (timeout_msecs == 0) {
4066 pqi_ctrl_unbusy(ctrl_info);
4071 if (pqi_ctrl_offline(ctrl_info)) {
4072 pqi_ctrl_unbusy(ctrl_info);
4077 atomic_inc(&ctrl_info->sync_cmds_outstanding);
4079 io_request = pqi_alloc_io_request(ctrl_info);
4081 put_unaligned_le16(io_request->index,
4082 &(((struct pqi_raid_path_request *)request)->request_id));
4084 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4085 ((struct pqi_raid_path_request *)request)->error_index =
4086 ((struct pqi_raid_path_request *)request)->request_id;
4088 iu_length = get_unaligned_le16(&request->iu_length) +
4089 PQI_REQUEST_HEADER_LENGTH;
4090 memcpy(io_request->iu, request, iu_length);
4092 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4093 io_request->context = &wait;
4095 pqi_start_io(ctrl_info,
4096 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4099 pqi_ctrl_unbusy(ctrl_info);
4101 if (timeout_msecs == NO_TIMEOUT) {
4102 pqi_wait_for_completion_io(ctrl_info, &wait);
4104 if (!wait_for_completion_io_timeout(&wait,
4105 msecs_to_jiffies(timeout_msecs))) {
4106 dev_warn(&ctrl_info->pci_dev->dev,
4107 "command timed out\n");
4113 if (io_request->error_info)
4114 memcpy(error_info, io_request->error_info,
4115 sizeof(*error_info));
4117 memset(error_info, 0, sizeof(*error_info));
4118 } else if (rc == 0 && io_request->error_info) {
4119 rc = pqi_process_raid_io_error_synchronous(
4120 io_request->error_info);
4123 pqi_free_io_request(io_request);
4125 atomic_dec(&ctrl_info->sync_cmds_outstanding);
4127 up(&ctrl_info->sync_request_sem);
4132 static int pqi_validate_admin_response(
4133 struct pqi_general_admin_response *response, u8 expected_function_code)
4135 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4138 if (get_unaligned_le16(&response->header.iu_length) !=
4139 PQI_GENERAL_ADMIN_IU_LENGTH)
4142 if (response->function_code != expected_function_code)
4145 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4151 static int pqi_submit_admin_request_synchronous(
4152 struct pqi_ctrl_info *ctrl_info,
4153 struct pqi_general_admin_request *request,
4154 struct pqi_general_admin_response *response)
4158 pqi_submit_admin_request(ctrl_info, request);
4160 rc = pqi_poll_for_admin_response(ctrl_info, response);
4163 rc = pqi_validate_admin_response(response,
4164 request->function_code);
4169 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4172 struct pqi_general_admin_request request;
4173 struct pqi_general_admin_response response;
4174 struct pqi_device_capability *capability;
4175 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4177 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4181 memset(&request, 0, sizeof(request));
4183 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4184 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4185 &request.header.iu_length);
4186 request.function_code =
4187 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4188 put_unaligned_le32(sizeof(*capability),
4189 &request.data.report_device_capability.buffer_length);
4191 rc = pqi_map_single(ctrl_info->pci_dev,
4192 &request.data.report_device_capability.sg_descriptor,
4193 capability, sizeof(*capability),
4198 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4201 pqi_pci_unmap(ctrl_info->pci_dev,
4202 &request.data.report_device_capability.sg_descriptor, 1,
4208 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4213 ctrl_info->max_inbound_queues =
4214 get_unaligned_le16(&capability->max_inbound_queues);
4215 ctrl_info->max_elements_per_iq =
4216 get_unaligned_le16(&capability->max_elements_per_iq);
4217 ctrl_info->max_iq_element_length =
4218 get_unaligned_le16(&capability->max_iq_element_length)
4220 ctrl_info->max_outbound_queues =
4221 get_unaligned_le16(&capability->max_outbound_queues);
4222 ctrl_info->max_elements_per_oq =
4223 get_unaligned_le16(&capability->max_elements_per_oq);
4224 ctrl_info->max_oq_element_length =
4225 get_unaligned_le16(&capability->max_oq_element_length)
4228 sop_iu_layer_descriptor =
4229 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4231 ctrl_info->max_inbound_iu_length_per_firmware =
4233 &sop_iu_layer_descriptor->max_inbound_iu_length);
4234 ctrl_info->inbound_spanning_supported =
4235 sop_iu_layer_descriptor->inbound_spanning_supported;
4236 ctrl_info->outbound_spanning_supported =
4237 sop_iu_layer_descriptor->outbound_spanning_supported;
4245 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4247 if (ctrl_info->max_iq_element_length <
4248 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4249 dev_err(&ctrl_info->pci_dev->dev,
4250 "max. inbound queue element length of %d is less than the required length of %d\n",
4251 ctrl_info->max_iq_element_length,
4252 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4256 if (ctrl_info->max_oq_element_length <
4257 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4258 dev_err(&ctrl_info->pci_dev->dev,
4259 "max. outbound queue element length of %d is less than the required length of %d\n",
4260 ctrl_info->max_oq_element_length,
4261 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4265 if (ctrl_info->max_inbound_iu_length_per_firmware <
4266 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4267 dev_err(&ctrl_info->pci_dev->dev,
4268 "max. inbound IU length of %u is less than the min. required length of %d\n",
4269 ctrl_info->max_inbound_iu_length_per_firmware,
4270 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4274 if (!ctrl_info->inbound_spanning_supported) {
4275 dev_err(&ctrl_info->pci_dev->dev,
4276 "the controller does not support inbound spanning\n");
4280 if (ctrl_info->outbound_spanning_supported) {
4281 dev_err(&ctrl_info->pci_dev->dev,
4282 "the controller supports outbound spanning but this driver does not\n");
4289 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4292 struct pqi_event_queue *event_queue;
4293 struct pqi_general_admin_request request;
4294 struct pqi_general_admin_response response;
4296 event_queue = &ctrl_info->event_queue;
4299 * Create OQ (Outbound Queue - device to host queue) to dedicate
4302 memset(&request, 0, sizeof(request));
4303 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4304 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4305 &request.header.iu_length);
4306 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4307 put_unaligned_le16(event_queue->oq_id,
4308 &request.data.create_operational_oq.queue_id);
4309 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4310 &request.data.create_operational_oq.element_array_addr);
4311 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4312 &request.data.create_operational_oq.pi_addr);
4313 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4314 &request.data.create_operational_oq.num_elements);
4315 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4316 &request.data.create_operational_oq.element_length);
4317 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4318 put_unaligned_le16(event_queue->int_msg_num,
4319 &request.data.create_operational_oq.int_msg_num);
4321 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4326 event_queue->oq_ci = ctrl_info->iomem_base +
4327 PQI_DEVICE_REGISTERS_OFFSET +
4329 &response.data.create_operational_oq.oq_ci_offset);
4334 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4335 unsigned int group_number)
4338 struct pqi_queue_group *queue_group;
4339 struct pqi_general_admin_request request;
4340 struct pqi_general_admin_response response;
4342 queue_group = &ctrl_info->queue_groups[group_number];
4345 * Create IQ (Inbound Queue - host to device queue) for
4348 memset(&request, 0, sizeof(request));
4349 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4350 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4351 &request.header.iu_length);
4352 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4353 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4354 &request.data.create_operational_iq.queue_id);
4356 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4357 &request.data.create_operational_iq.element_array_addr);
4358 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4359 &request.data.create_operational_iq.ci_addr);
4360 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4361 &request.data.create_operational_iq.num_elements);
4362 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4363 &request.data.create_operational_iq.element_length);
4364 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4366 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4369 dev_err(&ctrl_info->pci_dev->dev,
4370 "error creating inbound RAID queue\n");
4374 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4375 PQI_DEVICE_REGISTERS_OFFSET +
4377 &response.data.create_operational_iq.iq_pi_offset);
4380 * Create IQ (Inbound Queue - host to device queue) for
4381 * Advanced I/O (AIO) path.
4383 memset(&request, 0, sizeof(request));
4384 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4385 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4386 &request.header.iu_length);
4387 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4388 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4389 &request.data.create_operational_iq.queue_id);
4390 put_unaligned_le64((u64)queue_group->
4391 iq_element_array_bus_addr[AIO_PATH],
4392 &request.data.create_operational_iq.element_array_addr);
4393 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4394 &request.data.create_operational_iq.ci_addr);
4395 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4396 &request.data.create_operational_iq.num_elements);
4397 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4398 &request.data.create_operational_iq.element_length);
4399 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4401 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4404 dev_err(&ctrl_info->pci_dev->dev,
4405 "error creating inbound AIO queue\n");
4409 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4410 PQI_DEVICE_REGISTERS_OFFSET +
4412 &response.data.create_operational_iq.iq_pi_offset);
4415 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4416 * assumed to be for RAID path I/O unless we change the queue's
4419 memset(&request, 0, sizeof(request));
4420 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4421 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4422 &request.header.iu_length);
4423 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4424 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4425 &request.data.change_operational_iq_properties.queue_id);
4426 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4427 &request.data.change_operational_iq_properties.vendor_specific);
4429 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4432 dev_err(&ctrl_info->pci_dev->dev,
4433 "error changing queue property\n");
4438 * Create OQ (Outbound Queue - device to host queue).
4440 memset(&request, 0, sizeof(request));
4441 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4442 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4443 &request.header.iu_length);
4444 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4445 put_unaligned_le16(queue_group->oq_id,
4446 &request.data.create_operational_oq.queue_id);
4447 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4448 &request.data.create_operational_oq.element_array_addr);
4449 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4450 &request.data.create_operational_oq.pi_addr);
4451 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4452 &request.data.create_operational_oq.num_elements);
4453 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4454 &request.data.create_operational_oq.element_length);
4455 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4456 put_unaligned_le16(queue_group->int_msg_num,
4457 &request.data.create_operational_oq.int_msg_num);
4459 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4462 dev_err(&ctrl_info->pci_dev->dev,
4463 "error creating outbound queue\n");
4467 queue_group->oq_ci = ctrl_info->iomem_base +
4468 PQI_DEVICE_REGISTERS_OFFSET +
4470 &response.data.create_operational_oq.oq_ci_offset);
4475 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4480 rc = pqi_create_event_queue(ctrl_info);
4482 dev_err(&ctrl_info->pci_dev->dev,
4483 "error creating event queue\n");
4487 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4488 rc = pqi_create_queue_group(ctrl_info, i);
4490 dev_err(&ctrl_info->pci_dev->dev,
4491 "error creating queue group number %u/%u\n",
4492 i, ctrl_info->num_queue_groups);
4500 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4501 (offsetof(struct pqi_event_config, descriptors) + \
4502 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4504 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4509 struct pqi_event_config *event_config;
4510 struct pqi_event_descriptor *event_descriptor;
4511 struct pqi_general_management_request request;
4513 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4518 memset(&request, 0, sizeof(request));
4520 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4521 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4522 data.report_event_configuration.sg_descriptors[1]) -
4523 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4524 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4525 &request.data.report_event_configuration.buffer_length);
4527 rc = pqi_map_single(ctrl_info->pci_dev,
4528 request.data.report_event_configuration.sg_descriptors,
4529 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4534 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4535 0, NULL, NO_TIMEOUT);
4537 pqi_pci_unmap(ctrl_info->pci_dev,
4538 request.data.report_event_configuration.sg_descriptors, 1,
4544 for (i = 0; i < event_config->num_event_descriptors; i++) {
4545 event_descriptor = &event_config->descriptors[i];
4546 if (enable_events &&
4547 pqi_is_supported_event(event_descriptor->event_type))
4548 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4549 &event_descriptor->oq_id);
4551 put_unaligned_le16(0, &event_descriptor->oq_id);
4554 memset(&request, 0, sizeof(request));
4556 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4557 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4558 data.report_event_configuration.sg_descriptors[1]) -
4559 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4560 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4561 &request.data.report_event_configuration.buffer_length);
4563 rc = pqi_map_single(ctrl_info->pci_dev,
4564 request.data.report_event_configuration.sg_descriptors,
4565 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4570 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4573 pqi_pci_unmap(ctrl_info->pci_dev,
4574 request.data.report_event_configuration.sg_descriptors, 1,
4578 kfree(event_config);
4583 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4585 return pqi_configure_events(ctrl_info, true);
4588 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4590 return pqi_configure_events(ctrl_info, false);
4593 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4597 size_t sg_chain_buffer_length;
4598 struct pqi_io_request *io_request;
4600 if (!ctrl_info->io_request_pool)
4603 dev = &ctrl_info->pci_dev->dev;
4604 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4605 io_request = ctrl_info->io_request_pool;
4607 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4608 kfree(io_request->iu);
4609 if (!io_request->sg_chain_buffer)
4611 dma_free_coherent(dev, sg_chain_buffer_length,
4612 io_request->sg_chain_buffer,
4613 io_request->sg_chain_buffer_dma_handle);
4617 kfree(ctrl_info->io_request_pool);
4618 ctrl_info->io_request_pool = NULL;
4621 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4624 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
4625 ctrl_info->error_buffer_length,
4626 &ctrl_info->error_buffer_dma_handle,
4628 if (!ctrl_info->error_buffer)
4634 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4637 void *sg_chain_buffer;
4638 size_t sg_chain_buffer_length;
4639 dma_addr_t sg_chain_buffer_dma_handle;
4641 struct pqi_io_request *io_request;
4643 ctrl_info->io_request_pool =
4644 kcalloc(ctrl_info->max_io_slots,
4645 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4647 if (!ctrl_info->io_request_pool) {
4648 dev_err(&ctrl_info->pci_dev->dev,
4649 "failed to allocate I/O request pool\n");
4653 dev = &ctrl_info->pci_dev->dev;
4654 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4655 io_request = ctrl_info->io_request_pool;
4657 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4659 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4661 if (!io_request->iu) {
4662 dev_err(&ctrl_info->pci_dev->dev,
4663 "failed to allocate IU buffers\n");
4667 sg_chain_buffer = dma_alloc_coherent(dev,
4668 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4671 if (!sg_chain_buffer) {
4672 dev_err(&ctrl_info->pci_dev->dev,
4673 "failed to allocate PQI scatter-gather chain buffers\n");
4677 io_request->index = i;
4678 io_request->sg_chain_buffer = sg_chain_buffer;
4679 io_request->sg_chain_buffer_dma_handle =
4680 sg_chain_buffer_dma_handle;
4687 pqi_free_all_io_requests(ctrl_info);
4693 * Calculate required resources that are sized based on max. outstanding
4694 * requests and max. transfer size.
4697 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4699 u32 max_transfer_size;
4702 ctrl_info->scsi_ml_can_queue =
4703 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4704 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4706 ctrl_info->error_buffer_length =
4707 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4710 max_transfer_size = min(ctrl_info->max_transfer_size,
4711 PQI_MAX_TRANSFER_SIZE_KDUMP);
4713 max_transfer_size = min(ctrl_info->max_transfer_size,
4714 PQI_MAX_TRANSFER_SIZE);
4716 max_sg_entries = max_transfer_size / PAGE_SIZE;
4718 /* +1 to cover when the buffer is not page-aligned. */
4721 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4723 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4725 ctrl_info->sg_chain_buffer_length =
4726 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4727 PQI_EXTRA_SGL_MEMORY;
4728 ctrl_info->sg_tablesize = max_sg_entries;
4729 ctrl_info->max_sectors = max_transfer_size / 512;
4732 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4734 int num_queue_groups;
4735 u16 num_elements_per_iq;
4736 u16 num_elements_per_oq;
4738 if (reset_devices) {
4739 num_queue_groups = 1;
4742 int max_queue_groups;
4744 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4745 ctrl_info->max_outbound_queues - 1);
4746 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4748 num_cpus = num_online_cpus();
4749 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4750 num_queue_groups = min(num_queue_groups, max_queue_groups);
4753 ctrl_info->num_queue_groups = num_queue_groups;
4754 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
4757 * Make sure that the max. inbound IU length is an even multiple
4758 * of our inbound element length.
4760 ctrl_info->max_inbound_iu_length =
4761 (ctrl_info->max_inbound_iu_length_per_firmware /
4762 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4763 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4765 num_elements_per_iq =
4766 (ctrl_info->max_inbound_iu_length /
4767 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4769 /* Add one because one element in each queue is unusable. */
4770 num_elements_per_iq++;
4772 num_elements_per_iq = min(num_elements_per_iq,
4773 ctrl_info->max_elements_per_iq);
4775 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4776 num_elements_per_oq = min(num_elements_per_oq,
4777 ctrl_info->max_elements_per_oq);
4779 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4780 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4782 ctrl_info->max_sg_per_iu =
4783 ((ctrl_info->max_inbound_iu_length -
4784 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4785 sizeof(struct pqi_sg_descriptor)) +
4786 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4789 static inline void pqi_set_sg_descriptor(
4790 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4792 u64 address = (u64)sg_dma_address(sg);
4793 unsigned int length = sg_dma_len(sg);
4795 put_unaligned_le64(address, &sg_descriptor->address);
4796 put_unaligned_le32(length, &sg_descriptor->length);
4797 put_unaligned_le32(0, &sg_descriptor->flags);
4800 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4801 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4802 struct pqi_io_request *io_request)
4808 unsigned int num_sg_in_iu;
4809 unsigned int max_sg_per_iu;
4810 struct scatterlist *sg;
4811 struct pqi_sg_descriptor *sg_descriptor;
4813 sg_count = scsi_dma_map(scmd);
4817 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4818 PQI_REQUEST_HEADER_LENGTH;
4823 sg = scsi_sglist(scmd);
4824 sg_descriptor = request->sg_descriptors;
4825 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4831 pqi_set_sg_descriptor(sg_descriptor, sg);
4838 if (i == max_sg_per_iu) {
4840 (u64)io_request->sg_chain_buffer_dma_handle,
4841 &sg_descriptor->address);
4842 put_unaligned_le32((sg_count - num_sg_in_iu)
4843 * sizeof(*sg_descriptor),
4844 &sg_descriptor->length);
4845 put_unaligned_le32(CISS_SG_CHAIN,
4846 &sg_descriptor->flags);
4849 sg_descriptor = io_request->sg_chain_buffer;
4854 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4855 request->partial = chained;
4856 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4859 put_unaligned_le16(iu_length, &request->header.iu_length);
4864 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4865 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4866 struct pqi_io_request *io_request)
4872 unsigned int num_sg_in_iu;
4873 unsigned int max_sg_per_iu;
4874 struct scatterlist *sg;
4875 struct pqi_sg_descriptor *sg_descriptor;
4877 sg_count = scsi_dma_map(scmd);
4881 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4882 PQI_REQUEST_HEADER_LENGTH;
4888 sg = scsi_sglist(scmd);
4889 sg_descriptor = request->sg_descriptors;
4890 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4895 pqi_set_sg_descriptor(sg_descriptor, sg);
4902 if (i == max_sg_per_iu) {
4904 (u64)io_request->sg_chain_buffer_dma_handle,
4905 &sg_descriptor->address);
4906 put_unaligned_le32((sg_count - num_sg_in_iu)
4907 * sizeof(*sg_descriptor),
4908 &sg_descriptor->length);
4909 put_unaligned_le32(CISS_SG_CHAIN,
4910 &sg_descriptor->flags);
4913 sg_descriptor = io_request->sg_chain_buffer;
4918 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4919 request->partial = chained;
4920 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4923 put_unaligned_le16(iu_length, &request->header.iu_length);
4924 request->num_sg_descriptors = num_sg_in_iu;
4929 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4932 struct scsi_cmnd *scmd;
4934 scmd = io_request->scmd;
4935 pqi_free_io_request(io_request);
4936 scsi_dma_unmap(scmd);
4937 pqi_scsi_done(scmd);
4940 static int pqi_raid_submit_scsi_cmd_with_io_request(
4941 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
4942 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4943 struct pqi_queue_group *queue_group)
4947 struct pqi_raid_path_request *request;
4949 io_request->io_complete_callback = pqi_raid_io_complete;
4950 io_request->scmd = scmd;
4952 request = io_request->iu;
4954 offsetof(struct pqi_raid_path_request, sg_descriptors));
4956 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4957 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4958 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4959 put_unaligned_le16(io_request->index, &request->request_id);
4960 request->error_index = request->request_id;
4961 memcpy(request->lun_number, device->scsi3addr,
4962 sizeof(request->lun_number));
4964 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4965 memcpy(request->cdb, scmd->cmnd, cdb_length);
4967 switch (cdb_length) {
4972 /* No bytes in the Additional CDB bytes field */
4973 request->additional_cdb_bytes_usage =
4974 SOP_ADDITIONAL_CDB_BYTES_0;
4977 /* 4 bytes in the Additional cdb field */
4978 request->additional_cdb_bytes_usage =
4979 SOP_ADDITIONAL_CDB_BYTES_4;
4982 /* 8 bytes in the Additional cdb field */
4983 request->additional_cdb_bytes_usage =
4984 SOP_ADDITIONAL_CDB_BYTES_8;
4987 /* 12 bytes in the Additional cdb field */
4988 request->additional_cdb_bytes_usage =
4989 SOP_ADDITIONAL_CDB_BYTES_12;
4993 /* 16 bytes in the Additional cdb field */
4994 request->additional_cdb_bytes_usage =
4995 SOP_ADDITIONAL_CDB_BYTES_16;
4999 switch (scmd->sc_data_direction) {
5000 case DMA_FROM_DEVICE:
5001 request->data_direction = SOP_READ_FLAG;
5004 request->data_direction = SOP_WRITE_FLAG;
5007 request->data_direction = SOP_NO_DIRECTION_FLAG;
5009 case DMA_BIDIRECTIONAL:
5010 request->data_direction = SOP_BIDIRECTIONAL;
5013 dev_err(&ctrl_info->pci_dev->dev,
5014 "unknown data direction: %d\n",
5015 scmd->sc_data_direction);
5019 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5021 pqi_free_io_request(io_request);
5022 return SCSI_MLQUEUE_HOST_BUSY;
5025 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5030 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5031 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5032 struct pqi_queue_group *queue_group)
5034 struct pqi_io_request *io_request;
5036 io_request = pqi_alloc_io_request(ctrl_info);
5038 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5039 device, scmd, queue_group);
5042 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
5044 if (!pqi_ctrl_blocked(ctrl_info))
5045 schedule_work(&ctrl_info->raid_bypass_retry_work);
5048 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5050 struct scsi_cmnd *scmd;
5051 struct pqi_scsi_dev *device;
5052 struct pqi_ctrl_info *ctrl_info;
5054 if (!io_request->raid_bypass)
5057 scmd = io_request->scmd;
5058 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5060 if (host_byte(scmd->result) == DID_NO_CONNECT)
5063 device = scmd->device->hostdata;
5064 if (pqi_device_offline(device))
5067 ctrl_info = shost_to_hba(scmd->device->host);
5068 if (pqi_ctrl_offline(ctrl_info))
5074 static inline void pqi_add_to_raid_bypass_retry_list(
5075 struct pqi_ctrl_info *ctrl_info,
5076 struct pqi_io_request *io_request, bool at_head)
5078 unsigned long flags;
5080 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5082 list_add(&io_request->request_list_entry,
5083 &ctrl_info->raid_bypass_retry_list);
5085 list_add_tail(&io_request->request_list_entry,
5086 &ctrl_info->raid_bypass_retry_list);
5087 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5090 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
5093 struct scsi_cmnd *scmd;
5095 scmd = io_request->scmd;
5096 pqi_free_io_request(io_request);
5097 pqi_scsi_done(scmd);
5100 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
5102 struct scsi_cmnd *scmd;
5103 struct pqi_ctrl_info *ctrl_info;
5105 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
5106 scmd = io_request->scmd;
5108 ctrl_info = shost_to_hba(scmd->device->host);
5110 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
5111 pqi_schedule_bypass_retry(ctrl_info);
5114 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
5116 struct scsi_cmnd *scmd;
5117 struct pqi_scsi_dev *device;
5118 struct pqi_ctrl_info *ctrl_info;
5119 struct pqi_queue_group *queue_group;
5121 scmd = io_request->scmd;
5122 device = scmd->device->hostdata;
5123 if (pqi_device_in_reset(device)) {
5124 pqi_free_io_request(io_request);
5125 set_host_byte(scmd, DID_RESET);
5126 pqi_scsi_done(scmd);
5130 ctrl_info = shost_to_hba(scmd->device->host);
5131 queue_group = io_request->queue_group;
5133 pqi_reinit_io_request(io_request);
5135 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5136 device, scmd, queue_group);
5139 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
5140 struct pqi_ctrl_info *ctrl_info)
5142 unsigned long flags;
5143 struct pqi_io_request *io_request;
5145 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5146 io_request = list_first_entry_or_null(
5147 &ctrl_info->raid_bypass_retry_list,
5148 struct pqi_io_request, request_list_entry);
5150 list_del(&io_request->request_list_entry);
5151 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5156 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
5159 struct pqi_io_request *io_request;
5161 pqi_ctrl_busy(ctrl_info);
5164 if (pqi_ctrl_blocked(ctrl_info))
5166 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
5169 rc = pqi_retry_raid_bypass(io_request);
5171 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
5173 pqi_schedule_bypass_retry(ctrl_info);
5178 pqi_ctrl_unbusy(ctrl_info);
5181 static void pqi_raid_bypass_retry_worker(struct work_struct *work)
5183 struct pqi_ctrl_info *ctrl_info;
5185 ctrl_info = container_of(work, struct pqi_ctrl_info,
5186 raid_bypass_retry_work);
5187 pqi_retry_raid_bypass_requests(ctrl_info);
5190 static void pqi_clear_all_queued_raid_bypass_retries(
5191 struct pqi_ctrl_info *ctrl_info)
5193 unsigned long flags;
5195 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5196 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
5197 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5200 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5203 struct scsi_cmnd *scmd;
5205 scmd = io_request->scmd;
5206 scsi_dma_unmap(scmd);
5207 if (io_request->status == -EAGAIN)
5208 set_host_byte(scmd, DID_IMM_RETRY);
5209 else if (pqi_raid_bypass_retry_needed(io_request)) {
5210 pqi_queue_raid_bypass_retry(io_request);
5213 pqi_free_io_request(io_request);
5214 pqi_scsi_done(scmd);
5217 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5218 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5219 struct pqi_queue_group *queue_group)
5221 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5222 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
5225 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5226 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5227 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5228 struct pqi_encryption_info *encryption_info, bool raid_bypass)
5231 struct pqi_io_request *io_request;
5232 struct pqi_aio_path_request *request;
5234 io_request = pqi_alloc_io_request(ctrl_info);
5235 io_request->io_complete_callback = pqi_aio_io_complete;
5236 io_request->scmd = scmd;
5237 io_request->raid_bypass = raid_bypass;
5239 request = io_request->iu;
5241 offsetof(struct pqi_raid_path_request, sg_descriptors));
5243 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5244 put_unaligned_le32(aio_handle, &request->nexus_id);
5245 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5246 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5247 put_unaligned_le16(io_request->index, &request->request_id);
5248 request->error_index = request->request_id;
5249 if (cdb_length > sizeof(request->cdb))
5250 cdb_length = sizeof(request->cdb);
5251 request->cdb_length = cdb_length;
5252 memcpy(request->cdb, cdb, cdb_length);
5254 switch (scmd->sc_data_direction) {
5256 request->data_direction = SOP_READ_FLAG;
5258 case DMA_FROM_DEVICE:
5259 request->data_direction = SOP_WRITE_FLAG;
5262 request->data_direction = SOP_NO_DIRECTION_FLAG;
5264 case DMA_BIDIRECTIONAL:
5265 request->data_direction = SOP_BIDIRECTIONAL;
5268 dev_err(&ctrl_info->pci_dev->dev,
5269 "unknown data direction: %d\n",
5270 scmd->sc_data_direction);
5274 if (encryption_info) {
5275 request->encryption_enable = true;
5276 put_unaligned_le16(encryption_info->data_encryption_key_index,
5277 &request->data_encryption_key_index);
5278 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5279 &request->encrypt_tweak_lower);
5280 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5281 &request->encrypt_tweak_upper);
5284 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5286 pqi_free_io_request(io_request);
5287 return SCSI_MLQUEUE_HOST_BUSY;
5290 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5295 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5296 struct scsi_cmnd *scmd)
5300 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
5301 if (hw_queue > ctrl_info->max_hw_queue_index)
5308 * This function gets called just before we hand the completed SCSI request
5312 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5314 struct pqi_scsi_dev *device;
5316 if (!scmd->device) {
5317 set_host_byte(scmd, DID_NO_CONNECT);
5321 device = scmd->device->hostdata;
5323 set_host_byte(scmd, DID_NO_CONNECT);
5327 atomic_dec(&device->scsi_cmds_outstanding);
5330 static int pqi_scsi_queue_command(struct Scsi_Host *shost,
5331 struct scsi_cmnd *scmd)
5334 struct pqi_ctrl_info *ctrl_info;
5335 struct pqi_scsi_dev *device;
5337 struct pqi_queue_group *queue_group;
5340 device = scmd->device->hostdata;
5341 ctrl_info = shost_to_hba(shost);
5344 set_host_byte(scmd, DID_NO_CONNECT);
5345 pqi_scsi_done(scmd);
5349 atomic_inc(&device->scsi_cmds_outstanding);
5351 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
5353 set_host_byte(scmd, DID_NO_CONNECT);
5354 pqi_scsi_done(scmd);
5358 pqi_ctrl_busy(ctrl_info);
5359 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
5360 pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
5361 rc = SCSI_MLQUEUE_HOST_BUSY;
5366 * This is necessary because the SML doesn't zero out this field during
5371 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5372 queue_group = &ctrl_info->queue_groups[hw_queue];
5374 if (pqi_is_logical_device(device)) {
5375 raid_bypassed = false;
5376 if (device->raid_bypass_enabled &&
5377 !blk_rq_is_passthrough(scmd->request)) {
5378 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
5380 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
5381 raid_bypassed = true;
5382 atomic_inc(&device->raid_bypass_cnt);
5386 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5388 if (device->aio_enabled)
5389 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5391 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5395 pqi_ctrl_unbusy(ctrl_info);
5397 atomic_dec(&device->scsi_cmds_outstanding);
5402 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5403 struct pqi_queue_group *queue_group)
5406 unsigned long flags;
5409 for (path = 0; path < 2; path++) {
5412 &queue_group->submit_lock[path], flags);
5414 list_empty(&queue_group->request_list[path]);
5415 spin_unlock_irqrestore(
5416 &queue_group->submit_lock[path], flags);
5419 pqi_check_ctrl_health(ctrl_info);
5420 if (pqi_ctrl_offline(ctrl_info))
5422 usleep_range(1000, 2000);
5429 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5434 struct pqi_queue_group *queue_group;
5438 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5439 queue_group = &ctrl_info->queue_groups[i];
5441 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5445 for (path = 0; path < 2; path++) {
5446 iq_pi = queue_group->iq_pi_copy[path];
5449 iq_ci = readl(queue_group->iq_ci[path]);
5452 pqi_check_ctrl_health(ctrl_info);
5453 if (pqi_ctrl_offline(ctrl_info))
5455 usleep_range(1000, 2000);
5463 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5464 struct pqi_scsi_dev *device)
5468 struct pqi_queue_group *queue_group;
5469 unsigned long flags;
5470 struct pqi_io_request *io_request;
5471 struct pqi_io_request *next;
5472 struct scsi_cmnd *scmd;
5473 struct pqi_scsi_dev *scsi_device;
5475 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5476 queue_group = &ctrl_info->queue_groups[i];
5478 for (path = 0; path < 2; path++) {
5480 &queue_group->submit_lock[path], flags);
5482 list_for_each_entry_safe(io_request, next,
5483 &queue_group->request_list[path],
5484 request_list_entry) {
5485 scmd = io_request->scmd;
5489 scsi_device = scmd->device->hostdata;
5490 if (scsi_device != device)
5493 list_del(&io_request->request_list_entry);
5494 set_host_byte(scmd, DID_RESET);
5495 pqi_free_io_request(io_request);
5496 scsi_dma_unmap(scmd);
5497 pqi_scsi_done(scmd);
5500 spin_unlock_irqrestore(
5501 &queue_group->submit_lock[path], flags);
5506 static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
5510 struct pqi_queue_group *queue_group;
5511 unsigned long flags;
5512 struct pqi_io_request *io_request;
5513 struct pqi_io_request *next;
5514 struct scsi_cmnd *scmd;
5516 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5517 queue_group = &ctrl_info->queue_groups[i];
5519 for (path = 0; path < 2; path++) {
5520 spin_lock_irqsave(&queue_group->submit_lock[path],
5523 list_for_each_entry_safe(io_request, next,
5524 &queue_group->request_list[path],
5525 request_list_entry) {
5527 scmd = io_request->scmd;
5531 list_del(&io_request->request_list_entry);
5532 set_host_byte(scmd, DID_RESET);
5533 pqi_free_io_request(io_request);
5534 scsi_dma_unmap(scmd);
5535 pqi_scsi_done(scmd);
5538 spin_unlock_irqrestore(
5539 &queue_group->submit_lock[path], flags);
5544 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5545 struct pqi_scsi_dev *device, unsigned long timeout_secs)
5547 unsigned long timeout;
5549 timeout = (timeout_secs * PQI_HZ) + jiffies;
5551 while (atomic_read(&device->scsi_cmds_outstanding)) {
5552 pqi_check_ctrl_health(ctrl_info);
5553 if (pqi_ctrl_offline(ctrl_info))
5555 if (timeout_secs != NO_TIMEOUT) {
5556 if (time_after(jiffies, timeout)) {
5557 dev_err(&ctrl_info->pci_dev->dev,
5558 "timed out waiting for pending IO\n");
5562 usleep_range(1000, 2000);
5568 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5569 unsigned long timeout_secs)
5572 unsigned long flags;
5573 unsigned long timeout;
5574 struct pqi_scsi_dev *device;
5576 timeout = (timeout_secs * PQI_HZ) + jiffies;
5580 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5581 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5582 scsi_device_list_entry) {
5583 if (atomic_read(&device->scsi_cmds_outstanding)) {
5588 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5594 pqi_check_ctrl_health(ctrl_info);
5595 if (pqi_ctrl_offline(ctrl_info))
5598 if (timeout_secs != NO_TIMEOUT) {
5599 if (time_after(jiffies, timeout)) {
5600 dev_err(&ctrl_info->pci_dev->dev,
5601 "timed out waiting for pending IO\n");
5605 usleep_range(1000, 2000);
5611 static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
5613 while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
5614 pqi_check_ctrl_health(ctrl_info);
5615 if (pqi_ctrl_offline(ctrl_info))
5617 usleep_range(1000, 2000);
5623 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5626 struct completion *waiting = context;
5631 #define PQI_LUN_RESET_TIMEOUT_SECS 30
5632 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
5634 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5635 struct pqi_scsi_dev *device, struct completion *wait)
5640 if (wait_for_completion_io_timeout(wait,
5641 PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
5646 pqi_check_ctrl_health(ctrl_info);
5647 if (pqi_ctrl_offline(ctrl_info)) {
5656 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
5657 struct pqi_scsi_dev *device)
5660 struct pqi_io_request *io_request;
5661 DECLARE_COMPLETION_ONSTACK(wait);
5662 struct pqi_task_management_request *request;
5664 io_request = pqi_alloc_io_request(ctrl_info);
5665 io_request->io_complete_callback = pqi_lun_reset_complete;
5666 io_request->context = &wait;
5668 request = io_request->iu;
5669 memset(request, 0, sizeof(*request));
5671 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5672 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5673 &request->header.iu_length);
5674 put_unaligned_le16(io_request->index, &request->request_id);
5675 memcpy(request->lun_number, device->scsi3addr,
5676 sizeof(request->lun_number));
5677 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5678 if (ctrl_info->tmf_iu_timeout_supported)
5679 put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS,
5682 pqi_start_io(ctrl_info,
5683 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5686 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5688 rc = io_request->status;
5690 pqi_free_io_request(io_request);
5695 /* Performs a reset at the LUN level. */
5697 #define PQI_LUN_RESET_RETRIES 3
5698 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
5699 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120
5701 static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5702 struct pqi_scsi_dev *device)
5705 unsigned int retries;
5706 unsigned long timeout_secs;
5708 for (retries = 0;;) {
5709 rc = pqi_lun_reset(ctrl_info, device);
5710 if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
5712 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
5715 timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
5717 rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
5719 return rc == 0 ? SUCCESS : FAILED;
5722 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5723 struct pqi_scsi_dev *device)
5727 mutex_lock(&ctrl_info->lun_reset_mutex);
5729 pqi_ctrl_block_requests(ctrl_info);
5730 pqi_ctrl_wait_until_quiesced(ctrl_info);
5731 pqi_fail_io_queued_for_device(ctrl_info, device);
5732 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5733 pqi_device_reset_start(device);
5734 pqi_ctrl_unblock_requests(ctrl_info);
5739 rc = _pqi_device_reset(ctrl_info, device);
5741 pqi_device_reset_done(device);
5743 mutex_unlock(&ctrl_info->lun_reset_mutex);
5748 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5751 struct Scsi_Host *shost;
5752 struct pqi_ctrl_info *ctrl_info;
5753 struct pqi_scsi_dev *device;
5755 shost = scmd->device->host;
5756 ctrl_info = shost_to_hba(shost);
5757 device = scmd->device->hostdata;
5759 dev_err(&ctrl_info->pci_dev->dev,
5760 "resetting scsi %d:%d:%d:%d\n",
5761 shost->host_no, device->bus, device->target, device->lun);
5763 pqi_check_ctrl_health(ctrl_info);
5764 if (pqi_ctrl_offline(ctrl_info) ||
5765 pqi_device_reset_blocked(ctrl_info)) {
5770 pqi_wait_until_ofa_finished(ctrl_info);
5772 atomic_inc(&ctrl_info->sync_cmds_outstanding);
5773 rc = pqi_device_reset(ctrl_info, device);
5774 atomic_dec(&ctrl_info->sync_cmds_outstanding);
5777 dev_err(&ctrl_info->pci_dev->dev,
5778 "reset of scsi %d:%d:%d:%d: %s\n",
5779 shost->host_no, device->bus, device->target, device->lun,
5780 rc == SUCCESS ? "SUCCESS" : "FAILED");
5785 static int pqi_slave_alloc(struct scsi_device *sdev)
5787 struct pqi_scsi_dev *device;
5788 unsigned long flags;
5789 struct pqi_ctrl_info *ctrl_info;
5790 struct scsi_target *starget;
5791 struct sas_rphy *rphy;
5793 ctrl_info = shost_to_hba(sdev->host);
5795 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5797 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5798 starget = scsi_target(sdev);
5799 rphy = target_to_rphy(starget);
5800 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5802 device->target = sdev_id(sdev);
5803 device->lun = sdev->lun;
5804 device->target_lun_valid = true;
5807 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5808 sdev_id(sdev), sdev->lun);
5812 sdev->hostdata = device;
5813 device->sdev = sdev;
5814 if (device->queue_depth) {
5815 device->advertised_queue_depth = device->queue_depth;
5816 scsi_change_queue_depth(sdev,
5817 device->advertised_queue_depth);
5819 if (pqi_is_logical_device(device))
5820 pqi_disable_write_same(sdev);
5822 sdev->allow_restart = 1;
5825 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5830 static int pqi_map_queues(struct Scsi_Host *shost)
5832 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5834 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
5835 ctrl_info->pci_dev, 0);
5838 static int pqi_slave_configure(struct scsi_device *sdev)
5840 struct pqi_scsi_dev *device;
5842 device = sdev->hostdata;
5843 device->devtype = sdev->type;
5848 static void pqi_slave_destroy(struct scsi_device *sdev)
5850 unsigned long flags;
5851 struct pqi_scsi_dev *device;
5852 struct pqi_ctrl_info *ctrl_info;
5854 ctrl_info = shost_to_hba(sdev->host);
5856 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5858 device = sdev->hostdata;
5860 sdev->hostdata = NULL;
5861 if (!list_empty(&device->scsi_device_list_entry))
5862 list_del(&device->scsi_device_list_entry);
5865 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5868 pqi_dev_info(ctrl_info, "removed", device);
5869 pqi_free_device(device);
5873 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5875 struct pci_dev *pci_dev;
5876 u32 subsystem_vendor;
5877 u32 subsystem_device;
5878 cciss_pci_info_struct pciinfo;
5883 pci_dev = ctrl_info->pci_dev;
5885 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5886 pciinfo.bus = pci_dev->bus->number;
5887 pciinfo.dev_fn = pci_dev->devfn;
5888 subsystem_vendor = pci_dev->subsystem_vendor;
5889 subsystem_device = pci_dev->subsystem_device;
5890 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
5892 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5898 static int pqi_getdrivver_ioctl(void __user *arg)
5905 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5906 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5908 if (copy_to_user(arg, &version, sizeof(version)))
5914 struct ciss_error_info {
5917 size_t sense_data_length;
5920 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5921 struct ciss_error_info *ciss_error_info)
5923 int ciss_cmd_status;
5924 size_t sense_data_length;
5926 switch (pqi_error_info->data_out_result) {
5927 case PQI_DATA_IN_OUT_GOOD:
5928 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5930 case PQI_DATA_IN_OUT_UNDERFLOW:
5931 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5933 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5934 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5936 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5937 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5938 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5939 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5940 case PQI_DATA_IN_OUT_ERROR:
5941 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5943 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5944 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5945 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5946 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5947 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5948 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5949 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5950 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5951 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5952 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5953 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5955 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5956 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5958 case PQI_DATA_IN_OUT_ABORTED:
5959 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5961 case PQI_DATA_IN_OUT_TIMEOUT:
5962 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5965 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5970 get_unaligned_le16(&pqi_error_info->sense_data_length);
5971 if (sense_data_length == 0)
5973 get_unaligned_le16(&pqi_error_info->response_data_length);
5974 if (sense_data_length)
5975 if (sense_data_length > sizeof(pqi_error_info->data))
5976 sense_data_length = sizeof(pqi_error_info->data);
5978 ciss_error_info->scsi_status = pqi_error_info->status;
5979 ciss_error_info->command_status = ciss_cmd_status;
5980 ciss_error_info->sense_data_length = sense_data_length;
5983 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5986 char *kernel_buffer = NULL;
5988 size_t sense_data_length;
5989 IOCTL_Command_struct iocommand;
5990 struct pqi_raid_path_request request;
5991 struct pqi_raid_error_info pqi_error_info;
5992 struct ciss_error_info ciss_error_info;
5994 if (pqi_ctrl_offline(ctrl_info))
5998 if (!capable(CAP_SYS_RAWIO))
6000 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6002 if (iocommand.buf_size < 1 &&
6003 iocommand.Request.Type.Direction != XFER_NONE)
6005 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6007 if (iocommand.Request.Type.Type != TYPE_CMD)
6010 switch (iocommand.Request.Type.Direction) {
6014 case XFER_READ | XFER_WRITE:
6020 if (iocommand.buf_size > 0) {
6021 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6024 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6025 if (copy_from_user(kernel_buffer, iocommand.buf,
6026 iocommand.buf_size)) {
6031 memset(kernel_buffer, 0, iocommand.buf_size);
6035 memset(&request, 0, sizeof(request));
6037 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6038 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6039 PQI_REQUEST_HEADER_LENGTH;
6040 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6041 sizeof(request.lun_number));
6042 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6043 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6045 switch (iocommand.Request.Type.Direction) {
6047 request.data_direction = SOP_NO_DIRECTION_FLAG;
6050 request.data_direction = SOP_WRITE_FLAG;
6053 request.data_direction = SOP_READ_FLAG;
6055 case XFER_READ | XFER_WRITE:
6056 request.data_direction = SOP_BIDIRECTIONAL;
6060 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6062 if (iocommand.buf_size > 0) {
6063 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6065 rc = pqi_map_single(ctrl_info->pci_dev,
6066 &request.sg_descriptors[0], kernel_buffer,
6067 iocommand.buf_size, DMA_BIDIRECTIONAL);
6071 iu_length += sizeof(request.sg_descriptors[0]);
6074 put_unaligned_le16(iu_length, &request.header.iu_length);
6076 if (ctrl_info->raid_iu_timeout_supported)
6077 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6079 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6080 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
6082 if (iocommand.buf_size > 0)
6083 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6086 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6089 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6090 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6091 iocommand.error_info.CommandStatus =
6092 ciss_error_info.command_status;
6093 sense_data_length = ciss_error_info.sense_data_length;
6094 if (sense_data_length) {
6095 if (sense_data_length >
6096 sizeof(iocommand.error_info.SenseInfo))
6098 sizeof(iocommand.error_info.SenseInfo);
6099 memcpy(iocommand.error_info.SenseInfo,
6100 pqi_error_info.data, sense_data_length);
6101 iocommand.error_info.SenseLen = sense_data_length;
6105 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6110 if (rc == 0 && iocommand.buf_size > 0 &&
6111 (iocommand.Request.Type.Direction & XFER_READ)) {
6112 if (copy_to_user(iocommand.buf, kernel_buffer,
6113 iocommand.buf_size)) {
6119 kfree(kernel_buffer);
6124 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6128 struct pqi_ctrl_info *ctrl_info;
6130 ctrl_info = shost_to_hba(sdev->host);
6132 if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
6136 case CCISS_DEREGDISK:
6137 case CCISS_REGNEWDISK:
6139 rc = pqi_scan_scsi_devices(ctrl_info);
6141 case CCISS_GETPCIINFO:
6142 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6144 case CCISS_GETDRIVVER:
6145 rc = pqi_getdrivver_ioctl(arg);
6147 case CCISS_PASSTHRU:
6148 rc = pqi_passthru_ioctl(ctrl_info, arg);
6158 static ssize_t pqi_firmware_version_show(struct device *dev,
6159 struct device_attribute *attr, char *buffer)
6161 struct Scsi_Host *shost;
6162 struct pqi_ctrl_info *ctrl_info;
6164 shost = class_to_shost(dev);
6165 ctrl_info = shost_to_hba(shost);
6167 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6170 static ssize_t pqi_driver_version_show(struct device *dev,
6171 struct device_attribute *attr, char *buffer)
6173 return snprintf(buffer, PAGE_SIZE, "%s\n",
6174 DRIVER_VERSION BUILD_TIMESTAMP);
6177 static ssize_t pqi_serial_number_show(struct device *dev,
6178 struct device_attribute *attr, char *buffer)
6180 struct Scsi_Host *shost;
6181 struct pqi_ctrl_info *ctrl_info;
6183 shost = class_to_shost(dev);
6184 ctrl_info = shost_to_hba(shost);
6186 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6189 static ssize_t pqi_model_show(struct device *dev,
6190 struct device_attribute *attr, char *buffer)
6192 struct Scsi_Host *shost;
6193 struct pqi_ctrl_info *ctrl_info;
6195 shost = class_to_shost(dev);
6196 ctrl_info = shost_to_hba(shost);
6198 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6201 static ssize_t pqi_vendor_show(struct device *dev,
6202 struct device_attribute *attr, char *buffer)
6204 struct Scsi_Host *shost;
6205 struct pqi_ctrl_info *ctrl_info;
6207 shost = class_to_shost(dev);
6208 ctrl_info = shost_to_hba(shost);
6210 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6213 static ssize_t pqi_host_rescan_store(struct device *dev,
6214 struct device_attribute *attr, const char *buffer, size_t count)
6216 struct Scsi_Host *shost = class_to_shost(dev);
6218 pqi_scan_start(shost);
6223 static ssize_t pqi_lockup_action_show(struct device *dev,
6224 struct device_attribute *attr, char *buffer)
6229 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6230 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6231 count += scnprintf(buffer + count, PAGE_SIZE - count,
6232 "[%s] ", pqi_lockup_actions[i].name);
6234 count += scnprintf(buffer + count, PAGE_SIZE - count,
6235 "%s ", pqi_lockup_actions[i].name);
6238 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6243 static ssize_t pqi_lockup_action_store(struct device *dev,
6244 struct device_attribute *attr, const char *buffer, size_t count)
6248 char action_name_buffer[32];
6250 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6251 action_name = strstrip(action_name_buffer);
6253 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6254 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6255 pqi_lockup_action = pqi_lockup_actions[i].action;
6263 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6264 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6265 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6266 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6267 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6268 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6269 static DEVICE_ATTR(lockup_action, 0644,
6270 pqi_lockup_action_show, pqi_lockup_action_store);
6272 static struct device_attribute *pqi_shost_attrs[] = {
6273 &dev_attr_driver_version,
6274 &dev_attr_firmware_version,
6276 &dev_attr_serial_number,
6279 &dev_attr_lockup_action,
6283 static ssize_t pqi_unique_id_show(struct device *dev,
6284 struct device_attribute *attr, char *buffer)
6286 struct pqi_ctrl_info *ctrl_info;
6287 struct scsi_device *sdev;
6288 struct pqi_scsi_dev *device;
6289 unsigned long flags;
6292 sdev = to_scsi_device(dev);
6293 ctrl_info = shost_to_hba(sdev->host);
6295 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6297 device = sdev->hostdata;
6299 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6303 if (device->is_physical_device) {
6304 memset(unique_id, 0, 8);
6305 memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
6307 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
6310 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6312 return snprintf(buffer, PAGE_SIZE,
6313 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
6314 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
6315 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
6316 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
6317 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
6320 static ssize_t pqi_lunid_show(struct device *dev,
6321 struct device_attribute *attr, char *buffer)
6323 struct pqi_ctrl_info *ctrl_info;
6324 struct scsi_device *sdev;
6325 struct pqi_scsi_dev *device;
6326 unsigned long flags;
6329 sdev = to_scsi_device(dev);
6330 ctrl_info = shost_to_hba(sdev->host);
6332 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6334 device = sdev->hostdata;
6336 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6340 memcpy(lunid, device->scsi3addr, sizeof(lunid));
6342 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6344 return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6349 static ssize_t pqi_path_info_show(struct device *dev,
6350 struct device_attribute *attr, char *buf)
6352 struct pqi_ctrl_info *ctrl_info;
6353 struct scsi_device *sdev;
6354 struct pqi_scsi_dev *device;
6355 unsigned long flags;
6362 u8 phys_connector[2];
6364 sdev = to_scsi_device(dev);
6365 ctrl_info = shost_to_hba(sdev->host);
6367 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6369 device = sdev->hostdata;
6371 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6376 for (i = 0; i < MAX_PATHS; i++) {
6377 path_map_index = 1 << i;
6378 if (i == device->active_path_index)
6380 else if (device->path_map & path_map_index)
6381 active = "Inactive";
6385 output_len += scnprintf(buf + output_len,
6386 PAGE_SIZE - output_len,
6387 "[%d:%d:%d:%d] %20.20s ",
6388 ctrl_info->scsi_host->host_no,
6389 device->bus, device->target,
6391 scsi_device_type(device->devtype));
6393 if (device->devtype == TYPE_RAID ||
6394 pqi_is_logical_device(device))
6397 memcpy(&phys_connector, &device->phys_connector[i],
6398 sizeof(phys_connector));
6399 if (phys_connector[0] < '0')
6400 phys_connector[0] = '0';
6401 if (phys_connector[1] < '0')
6402 phys_connector[1] = '0';
6404 output_len += scnprintf(buf + output_len,
6405 PAGE_SIZE - output_len,
6406 "PORT: %.2s ", phys_connector);
6408 box = device->box[i];
6409 if (box != 0 && box != 0xFF)
6410 output_len += scnprintf(buf + output_len,
6411 PAGE_SIZE - output_len,
6414 if ((device->devtype == TYPE_DISK ||
6415 device->devtype == TYPE_ZBC) &&
6416 pqi_expose_device(device))
6417 output_len += scnprintf(buf + output_len,
6418 PAGE_SIZE - output_len,
6422 output_len += scnprintf(buf + output_len,
6423 PAGE_SIZE - output_len,
6427 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6432 static ssize_t pqi_sas_address_show(struct device *dev,
6433 struct device_attribute *attr, char *buffer)
6435 struct pqi_ctrl_info *ctrl_info;
6436 struct scsi_device *sdev;
6437 struct pqi_scsi_dev *device;
6438 unsigned long flags;
6441 sdev = to_scsi_device(dev);
6442 ctrl_info = shost_to_hba(sdev->host);
6444 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6446 device = sdev->hostdata;
6447 if (!device || !pqi_is_device_with_sas_address(device)) {
6448 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6452 sas_address = device->sas_address;
6454 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6456 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6459 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
6460 struct device_attribute *attr, char *buffer)
6462 struct pqi_ctrl_info *ctrl_info;
6463 struct scsi_device *sdev;
6464 struct pqi_scsi_dev *device;
6465 unsigned long flags;
6467 sdev = to_scsi_device(dev);
6468 ctrl_info = shost_to_hba(sdev->host);
6470 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6472 device = sdev->hostdata;
6474 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6478 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
6482 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6487 static ssize_t pqi_raid_level_show(struct device *dev,
6488 struct device_attribute *attr, char *buffer)
6490 struct pqi_ctrl_info *ctrl_info;
6491 struct scsi_device *sdev;
6492 struct pqi_scsi_dev *device;
6493 unsigned long flags;
6496 sdev = to_scsi_device(dev);
6497 ctrl_info = shost_to_hba(sdev->host);
6499 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6501 device = sdev->hostdata;
6503 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6507 if (pqi_is_logical_device(device))
6508 raid_level = pqi_raid_level_to_string(device->raid_level);
6512 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6514 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
6517 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
6518 struct device_attribute *attr, char *buffer)
6520 struct pqi_ctrl_info *ctrl_info;
6521 struct scsi_device *sdev;
6522 struct pqi_scsi_dev *device;
6523 unsigned long flags;
6524 int raid_bypass_cnt;
6526 sdev = to_scsi_device(dev);
6527 ctrl_info = shost_to_hba(sdev->host);
6529 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6531 device = sdev->hostdata;
6533 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6537 raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
6539 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6541 return snprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
6544 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
6545 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
6546 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
6547 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
6548 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
6549 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
6550 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
6552 static struct device_attribute *pqi_sdev_attrs[] = {
6554 &dev_attr_unique_id,
6555 &dev_attr_path_info,
6556 &dev_attr_sas_address,
6557 &dev_attr_ssd_smart_path_enabled,
6558 &dev_attr_raid_level,
6559 &dev_attr_raid_bypass_cnt,
6563 static struct scsi_host_template pqi_driver_template = {
6564 .module = THIS_MODULE,
6565 .name = DRIVER_NAME_SHORT,
6566 .proc_name = DRIVER_NAME_SHORT,
6567 .queuecommand = pqi_scsi_queue_command,
6568 .scan_start = pqi_scan_start,
6569 .scan_finished = pqi_scan_finished,
6571 .eh_device_reset_handler = pqi_eh_device_reset_handler,
6573 .slave_alloc = pqi_slave_alloc,
6574 .slave_configure = pqi_slave_configure,
6575 .slave_destroy = pqi_slave_destroy,
6576 .map_queues = pqi_map_queues,
6577 .sdev_attrs = pqi_sdev_attrs,
6578 .shost_attrs = pqi_shost_attrs,
6581 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
6584 struct Scsi_Host *shost;
6586 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
6588 dev_err(&ctrl_info->pci_dev->dev,
6589 "scsi_host_alloc failed for controller %u\n",
6590 ctrl_info->ctrl_id);
6595 shost->n_io_port = 0;
6596 shost->this_id = -1;
6597 shost->max_channel = PQI_MAX_BUS;
6598 shost->max_cmd_len = MAX_COMMAND_SIZE;
6599 shost->max_lun = ~0;
6601 shost->max_sectors = ctrl_info->max_sectors;
6602 shost->can_queue = ctrl_info->scsi_ml_can_queue;
6603 shost->cmd_per_lun = shost->can_queue;
6604 shost->sg_tablesize = ctrl_info->sg_tablesize;
6605 shost->transportt = pqi_sas_transport_template;
6606 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6607 shost->unique_id = shost->irq;
6608 shost->nr_hw_queues = ctrl_info->num_queue_groups;
6609 shost->host_tagset = 1;
6610 shost->hostdata[0] = (unsigned long)ctrl_info;
6612 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
6614 dev_err(&ctrl_info->pci_dev->dev,
6615 "scsi_add_host failed for controller %u\n",
6616 ctrl_info->ctrl_id);
6620 rc = pqi_add_sas_host(shost, ctrl_info);
6622 dev_err(&ctrl_info->pci_dev->dev,
6623 "add SAS host failed for controller %u\n",
6624 ctrl_info->ctrl_id);
6628 ctrl_info->scsi_host = shost;
6633 scsi_remove_host(shost);
6635 scsi_host_put(shost);
6640 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
6642 struct Scsi_Host *shost;
6644 pqi_delete_sas_host(ctrl_info);
6646 shost = ctrl_info->scsi_host;
6650 scsi_remove_host(shost);
6651 scsi_host_put(shost);
6654 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
6657 struct pqi_device_registers __iomem *pqi_registers;
6658 unsigned long timeout;
6659 unsigned int timeout_msecs;
6660 union pqi_reset_register reset_reg;
6662 pqi_registers = ctrl_info->pqi_registers;
6663 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
6664 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
6667 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
6668 reset_reg.all_bits = readl(&pqi_registers->device_reset);
6669 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
6671 pqi_check_ctrl_health(ctrl_info);
6672 if (pqi_ctrl_offline(ctrl_info)) {
6676 if (time_after(jiffies, timeout)) {
6685 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
6688 union pqi_reset_register reset_reg;
6690 if (ctrl_info->pqi_reset_quiesce_supported) {
6691 rc = sis_pqi_reset_quiesce(ctrl_info);
6693 dev_err(&ctrl_info->pci_dev->dev,
6694 "PQI reset failed during quiesce with error %d\n",
6700 reset_reg.all_bits = 0;
6701 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
6702 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
6704 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6706 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
6708 dev_err(&ctrl_info->pci_dev->dev,
6709 "PQI reset failed with error %d\n", rc);
6714 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
6717 struct bmic_sense_subsystem_info *sense_info;
6719 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
6723 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
6727 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
6728 sizeof(sense_info->ctrl_serial_number));
6729 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
6737 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
6740 struct bmic_identify_controller *identify;
6742 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
6746 rc = pqi_identify_controller(ctrl_info, identify);
6750 memcpy(ctrl_info->firmware_version, identify->firmware_version,
6751 sizeof(identify->firmware_version));
6752 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
6753 snprintf(ctrl_info->firmware_version +
6754 strlen(ctrl_info->firmware_version),
6755 sizeof(ctrl_info->firmware_version),
6756 "-%u", get_unaligned_le16(&identify->firmware_build_number));
6758 memcpy(ctrl_info->model, identify->product_id,
6759 sizeof(identify->product_id));
6760 ctrl_info->model[sizeof(identify->product_id)] = '\0';
6762 memcpy(ctrl_info->vendor, identify->vendor_id,
6763 sizeof(identify->vendor_id));
6764 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
6772 struct pqi_config_table_section_info {
6773 struct pqi_ctrl_info *ctrl_info;
6776 void __iomem *section_iomem_addr;
6779 static inline bool pqi_is_firmware_feature_supported(
6780 struct pqi_config_table_firmware_features *firmware_features,
6781 unsigned int bit_position)
6783 unsigned int byte_index;
6785 byte_index = bit_position / BITS_PER_BYTE;
6787 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
6790 return firmware_features->features_supported[byte_index] &
6791 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6794 static inline bool pqi_is_firmware_feature_enabled(
6795 struct pqi_config_table_firmware_features *firmware_features,
6796 void __iomem *firmware_features_iomem_addr,
6797 unsigned int bit_position)
6799 unsigned int byte_index;
6800 u8 __iomem *features_enabled_iomem_addr;
6802 byte_index = (bit_position / BITS_PER_BYTE) +
6803 (le16_to_cpu(firmware_features->num_elements) * 2);
6805 features_enabled_iomem_addr = firmware_features_iomem_addr +
6806 offsetof(struct pqi_config_table_firmware_features,
6807 features_supported) + byte_index;
6809 return *((__force u8 *)features_enabled_iomem_addr) &
6810 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6813 static inline void pqi_request_firmware_feature(
6814 struct pqi_config_table_firmware_features *firmware_features,
6815 unsigned int bit_position)
6817 unsigned int byte_index;
6819 byte_index = (bit_position / BITS_PER_BYTE) +
6820 le16_to_cpu(firmware_features->num_elements);
6822 firmware_features->features_supported[byte_index] |=
6823 (1 << (bit_position % BITS_PER_BYTE));
6826 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
6827 u16 first_section, u16 last_section)
6829 struct pqi_vendor_general_request request;
6831 memset(&request, 0, sizeof(request));
6833 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
6834 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
6835 &request.header.iu_length);
6836 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
6837 &request.function_code);
6838 put_unaligned_le16(first_section,
6839 &request.data.config_table_update.first_section);
6840 put_unaligned_le16(last_section,
6841 &request.data.config_table_update.last_section);
6843 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6844 0, NULL, NO_TIMEOUT);
6847 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
6848 struct pqi_config_table_firmware_features *firmware_features,
6849 void __iomem *firmware_features_iomem_addr)
6851 void *features_requested;
6852 void __iomem *features_requested_iomem_addr;
6854 features_requested = firmware_features->features_supported +
6855 le16_to_cpu(firmware_features->num_elements);
6857 features_requested_iomem_addr = firmware_features_iomem_addr +
6858 (features_requested - (void *)firmware_features);
6860 memcpy_toio(features_requested_iomem_addr, features_requested,
6861 le16_to_cpu(firmware_features->num_elements));
6863 return pqi_config_table_update(ctrl_info,
6864 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
6865 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
6868 struct pqi_firmware_feature {
6870 unsigned int feature_bit;
6873 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
6874 struct pqi_firmware_feature *firmware_feature);
6877 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
6878 struct pqi_firmware_feature *firmware_feature)
6880 if (!firmware_feature->supported) {
6881 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
6882 firmware_feature->feature_name);
6886 if (firmware_feature->enabled) {
6887 dev_info(&ctrl_info->pci_dev->dev,
6888 "%s enabled\n", firmware_feature->feature_name);
6892 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
6893 firmware_feature->feature_name);
6896 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
6897 struct pqi_firmware_feature *firmware_feature)
6899 switch (firmware_feature->feature_bit) {
6900 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
6901 ctrl_info->soft_reset_handshake_supported =
6902 firmware_feature->enabled;
6904 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
6905 ctrl_info->raid_iu_timeout_supported =
6906 firmware_feature->enabled;
6908 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
6909 ctrl_info->tmf_iu_timeout_supported =
6910 firmware_feature->enabled;
6914 pqi_firmware_feature_status(ctrl_info, firmware_feature);
6917 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
6918 struct pqi_firmware_feature *firmware_feature)
6920 if (firmware_feature->feature_status)
6921 firmware_feature->feature_status(ctrl_info, firmware_feature);
6924 static DEFINE_MUTEX(pqi_firmware_features_mutex);
6926 static struct pqi_firmware_feature pqi_firmware_features[] = {
6928 .feature_name = "Online Firmware Activation",
6929 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
6930 .feature_status = pqi_firmware_feature_status,
6933 .feature_name = "Serial Management Protocol",
6934 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
6935 .feature_status = pqi_firmware_feature_status,
6938 .feature_name = "New Soft Reset Handshake",
6939 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
6940 .feature_status = pqi_ctrl_update_feature_flags,
6943 .feature_name = "RAID IU Timeout",
6944 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
6945 .feature_status = pqi_ctrl_update_feature_flags,
6948 .feature_name = "TMF IU Timeout",
6949 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
6950 .feature_status = pqi_ctrl_update_feature_flags,
6954 static void pqi_process_firmware_features(
6955 struct pqi_config_table_section_info *section_info)
6958 struct pqi_ctrl_info *ctrl_info;
6959 struct pqi_config_table_firmware_features *firmware_features;
6960 void __iomem *firmware_features_iomem_addr;
6962 unsigned int num_features_supported;
6964 ctrl_info = section_info->ctrl_info;
6965 firmware_features = section_info->section;
6966 firmware_features_iomem_addr = section_info->section_iomem_addr;
6968 for (i = 0, num_features_supported = 0;
6969 i < ARRAY_SIZE(pqi_firmware_features); i++) {
6970 if (pqi_is_firmware_feature_supported(firmware_features,
6971 pqi_firmware_features[i].feature_bit)) {
6972 pqi_firmware_features[i].supported = true;
6973 num_features_supported++;
6975 pqi_firmware_feature_update(ctrl_info,
6976 &pqi_firmware_features[i]);
6980 if (num_features_supported == 0)
6983 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6984 if (!pqi_firmware_features[i].supported)
6986 pqi_request_firmware_feature(firmware_features,
6987 pqi_firmware_features[i].feature_bit);
6990 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
6991 firmware_features_iomem_addr);
6993 dev_err(&ctrl_info->pci_dev->dev,
6994 "failed to enable firmware features in PQI configuration table\n");
6995 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6996 if (!pqi_firmware_features[i].supported)
6998 pqi_firmware_feature_update(ctrl_info,
6999 &pqi_firmware_features[i]);
7004 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7005 if (!pqi_firmware_features[i].supported)
7007 if (pqi_is_firmware_feature_enabled(firmware_features,
7008 firmware_features_iomem_addr,
7009 pqi_firmware_features[i].feature_bit)) {
7010 pqi_firmware_features[i].enabled = true;
7012 pqi_firmware_feature_update(ctrl_info,
7013 &pqi_firmware_features[i]);
7017 static void pqi_init_firmware_features(void)
7021 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7022 pqi_firmware_features[i].supported = false;
7023 pqi_firmware_features[i].enabled = false;
7027 static void pqi_process_firmware_features_section(
7028 struct pqi_config_table_section_info *section_info)
7030 mutex_lock(&pqi_firmware_features_mutex);
7031 pqi_init_firmware_features();
7032 pqi_process_firmware_features(section_info);
7033 mutex_unlock(&pqi_firmware_features_mutex);
7036 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7040 void __iomem *table_iomem_addr;
7041 struct pqi_config_table *config_table;
7042 struct pqi_config_table_section_header *section;
7043 struct pqi_config_table_section_info section_info;
7045 table_length = ctrl_info->config_table_length;
7046 if (table_length == 0)
7049 config_table = kmalloc(table_length, GFP_KERNEL);
7050 if (!config_table) {
7051 dev_err(&ctrl_info->pci_dev->dev,
7052 "failed to allocate memory for PQI configuration table\n");
7057 * Copy the config table contents from I/O memory space into the
7060 table_iomem_addr = ctrl_info->iomem_base +
7061 ctrl_info->config_table_offset;
7062 memcpy_fromio(config_table, table_iomem_addr, table_length);
7064 section_info.ctrl_info = ctrl_info;
7066 get_unaligned_le32(&config_table->first_section_offset);
7068 while (section_offset) {
7069 section = (void *)config_table + section_offset;
7071 section_info.section = section;
7072 section_info.section_offset = section_offset;
7073 section_info.section_iomem_addr =
7074 table_iomem_addr + section_offset;
7076 switch (get_unaligned_le16(§ion->section_id)) {
7077 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7078 pqi_process_firmware_features_section(§ion_info);
7080 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
7081 if (pqi_disable_heartbeat)
7082 dev_warn(&ctrl_info->pci_dev->dev,
7083 "heartbeat disabled by module parameter\n");
7085 ctrl_info->heartbeat_counter =
7089 struct pqi_config_table_heartbeat,
7092 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7093 ctrl_info->soft_reset_status =
7096 offsetof(struct pqi_config_table_soft_reset,
7102 get_unaligned_le16(§ion->next_section_offset);
7105 kfree(config_table);
7110 /* Switches the controller from PQI mode back into SIS mode. */
7112 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7116 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
7117 rc = pqi_reset(ctrl_info);
7120 rc = sis_reenable_sis_mode(ctrl_info);
7122 dev_err(&ctrl_info->pci_dev->dev,
7123 "re-enabling SIS mode failed with error %d\n", rc);
7126 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7132 * If the controller isn't already in SIS mode, this function forces it into
7136 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
7138 if (!sis_is_firmware_running(ctrl_info))
7141 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
7144 if (sis_is_kernel_up(ctrl_info)) {
7145 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7149 return pqi_revert_to_sis_mode(ctrl_info);
7152 #define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
7154 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7158 if (reset_devices) {
7159 sis_soft_reset(ctrl_info);
7160 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7162 rc = pqi_force_sis_mode(ctrl_info);
7168 * Wait until the controller is ready to start accepting SIS
7171 rc = sis_wait_for_ctrl_ready(ctrl_info);
7176 * Get the controller properties. This allows us to determine
7177 * whether or not it supports PQI mode.
7179 rc = sis_get_ctrl_properties(ctrl_info);
7181 dev_err(&ctrl_info->pci_dev->dev,
7182 "error obtaining controller properties\n");
7186 rc = sis_get_pqi_capabilities(ctrl_info);
7188 dev_err(&ctrl_info->pci_dev->dev,
7189 "error obtaining controller capabilities\n");
7193 if (reset_devices) {
7194 if (ctrl_info->max_outstanding_requests >
7195 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
7196 ctrl_info->max_outstanding_requests =
7197 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
7199 if (ctrl_info->max_outstanding_requests >
7200 PQI_MAX_OUTSTANDING_REQUESTS)
7201 ctrl_info->max_outstanding_requests =
7202 PQI_MAX_OUTSTANDING_REQUESTS;
7205 pqi_calculate_io_resources(ctrl_info);
7207 rc = pqi_alloc_error_buffer(ctrl_info);
7209 dev_err(&ctrl_info->pci_dev->dev,
7210 "failed to allocate PQI error buffer\n");
7215 * If the function we are about to call succeeds, the
7216 * controller will transition from legacy SIS mode
7219 rc = sis_init_base_struct_addr(ctrl_info);
7221 dev_err(&ctrl_info->pci_dev->dev,
7222 "error initializing PQI mode\n");
7226 /* Wait for the controller to complete the SIS -> PQI transition. */
7227 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7229 dev_err(&ctrl_info->pci_dev->dev,
7230 "transition to PQI mode failed\n");
7234 /* From here on, we are running in PQI mode. */
7235 ctrl_info->pqi_mode_enabled = true;
7236 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7238 rc = pqi_alloc_admin_queues(ctrl_info);
7240 dev_err(&ctrl_info->pci_dev->dev,
7241 "failed to allocate admin queues\n");
7245 rc = pqi_create_admin_queues(ctrl_info);
7247 dev_err(&ctrl_info->pci_dev->dev,
7248 "error creating admin queues\n");
7252 rc = pqi_report_device_capability(ctrl_info);
7254 dev_err(&ctrl_info->pci_dev->dev,
7255 "obtaining device capability failed\n");
7259 rc = pqi_validate_device_capability(ctrl_info);
7263 pqi_calculate_queue_resources(ctrl_info);
7265 rc = pqi_enable_msix_interrupts(ctrl_info);
7269 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
7270 ctrl_info->max_msix_vectors =
7271 ctrl_info->num_msix_vectors_enabled;
7272 pqi_calculate_queue_resources(ctrl_info);
7275 rc = pqi_alloc_io_resources(ctrl_info);
7279 rc = pqi_alloc_operational_queues(ctrl_info);
7281 dev_err(&ctrl_info->pci_dev->dev,
7282 "failed to allocate operational queues\n");
7286 pqi_init_operational_queues(ctrl_info);
7288 rc = pqi_request_irqs(ctrl_info);
7292 rc = pqi_create_queues(ctrl_info);
7296 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7298 ctrl_info->controller_online = true;
7300 rc = pqi_process_config_table(ctrl_info);
7304 pqi_start_heartbeat_timer(ctrl_info);
7306 rc = pqi_enable_events(ctrl_info);
7308 dev_err(&ctrl_info->pci_dev->dev,
7309 "error enabling events\n");
7313 /* Register with the SCSI subsystem. */
7314 rc = pqi_register_scsi(ctrl_info);
7318 rc = pqi_get_ctrl_product_details(ctrl_info);
7320 dev_err(&ctrl_info->pci_dev->dev,
7321 "error obtaining product details\n");
7325 rc = pqi_get_ctrl_serial_number(ctrl_info);
7327 dev_err(&ctrl_info->pci_dev->dev,
7328 "error obtaining ctrl serial number\n");
7332 rc = pqi_set_diag_rescan(ctrl_info);
7334 dev_err(&ctrl_info->pci_dev->dev,
7335 "error enabling multi-lun rescan\n");
7339 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7341 dev_err(&ctrl_info->pci_dev->dev,
7342 "error updating host wellness\n");
7346 pqi_schedule_update_time_worker(ctrl_info);
7348 pqi_scan_scsi_devices(ctrl_info);
7353 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
7356 struct pqi_admin_queues *admin_queues;
7357 struct pqi_event_queue *event_queue;
7359 admin_queues = &ctrl_info->admin_queues;
7360 admin_queues->iq_pi_copy = 0;
7361 admin_queues->oq_ci_copy = 0;
7362 writel(0, admin_queues->oq_pi);
7364 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
7365 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
7366 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
7367 ctrl_info->queue_groups[i].oq_ci_copy = 0;
7369 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
7370 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
7371 writel(0, ctrl_info->queue_groups[i].oq_pi);
7374 event_queue = &ctrl_info->event_queue;
7375 writel(0, event_queue->oq_pi);
7376 event_queue->oq_ci_copy = 0;
7379 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
7383 rc = pqi_force_sis_mode(ctrl_info);
7388 * Wait until the controller is ready to start accepting SIS
7391 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
7396 * Get the controller properties. This allows us to determine
7397 * whether or not it supports PQI mode.
7399 rc = sis_get_ctrl_properties(ctrl_info);
7401 dev_err(&ctrl_info->pci_dev->dev,
7402 "error obtaining controller properties\n");
7406 rc = sis_get_pqi_capabilities(ctrl_info);
7408 dev_err(&ctrl_info->pci_dev->dev,
7409 "error obtaining controller capabilities\n");
7414 * If the function we are about to call succeeds, the
7415 * controller will transition from legacy SIS mode
7418 rc = sis_init_base_struct_addr(ctrl_info);
7420 dev_err(&ctrl_info->pci_dev->dev,
7421 "error initializing PQI mode\n");
7425 /* Wait for the controller to complete the SIS -> PQI transition. */
7426 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7428 dev_err(&ctrl_info->pci_dev->dev,
7429 "transition to PQI mode failed\n");
7433 /* From here on, we are running in PQI mode. */
7434 ctrl_info->pqi_mode_enabled = true;
7435 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7437 pqi_reinit_queues(ctrl_info);
7439 rc = pqi_create_admin_queues(ctrl_info);
7441 dev_err(&ctrl_info->pci_dev->dev,
7442 "error creating admin queues\n");
7446 rc = pqi_create_queues(ctrl_info);
7450 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7452 ctrl_info->controller_online = true;
7453 pqi_ctrl_unblock_requests(ctrl_info);
7455 rc = pqi_process_config_table(ctrl_info);
7459 pqi_start_heartbeat_timer(ctrl_info);
7461 rc = pqi_enable_events(ctrl_info);
7463 dev_err(&ctrl_info->pci_dev->dev,
7464 "error enabling events\n");
7468 rc = pqi_get_ctrl_product_details(ctrl_info);
7470 dev_err(&ctrl_info->pci_dev->dev,
7471 "error obtaining product details\n");
7475 rc = pqi_set_diag_rescan(ctrl_info);
7477 dev_err(&ctrl_info->pci_dev->dev,
7478 "error enabling multi-lun rescan\n");
7482 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7484 dev_err(&ctrl_info->pci_dev->dev,
7485 "error updating host wellness\n");
7489 pqi_schedule_update_time_worker(ctrl_info);
7491 pqi_scan_scsi_devices(ctrl_info);
7496 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
7501 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
7502 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
7504 return pcibios_err_to_errno(rc);
7507 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
7512 rc = pci_enable_device(ctrl_info->pci_dev);
7514 dev_err(&ctrl_info->pci_dev->dev,
7515 "failed to enable PCI device\n");
7519 if (sizeof(dma_addr_t) > 4)
7520 mask = DMA_BIT_MASK(64);
7522 mask = DMA_BIT_MASK(32);
7524 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
7526 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
7527 goto disable_device;
7530 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
7532 dev_err(&ctrl_info->pci_dev->dev,
7533 "failed to obtain PCI resources\n");
7534 goto disable_device;
7537 ctrl_info->iomem_base = ioremap(pci_resource_start(
7538 ctrl_info->pci_dev, 0),
7539 sizeof(struct pqi_ctrl_registers));
7540 if (!ctrl_info->iomem_base) {
7541 dev_err(&ctrl_info->pci_dev->dev,
7542 "failed to map memory for controller registers\n");
7544 goto release_regions;
7547 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
7549 /* Increase the PCIe completion timeout. */
7550 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
7551 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
7553 dev_err(&ctrl_info->pci_dev->dev,
7554 "failed to set PCIe completion timeout\n");
7555 goto release_regions;
7558 /* Enable bus mastering. */
7559 pci_set_master(ctrl_info->pci_dev);
7561 ctrl_info->registers = ctrl_info->iomem_base;
7562 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
7564 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
7569 pci_release_regions(ctrl_info->pci_dev);
7571 pci_disable_device(ctrl_info->pci_dev);
7576 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
7578 iounmap(ctrl_info->iomem_base);
7579 pci_release_regions(ctrl_info->pci_dev);
7580 if (pci_is_enabled(ctrl_info->pci_dev))
7581 pci_disable_device(ctrl_info->pci_dev);
7582 pci_set_drvdata(ctrl_info->pci_dev, NULL);
7585 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
7587 struct pqi_ctrl_info *ctrl_info;
7589 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
7590 GFP_KERNEL, numa_node);
7594 mutex_init(&ctrl_info->scan_mutex);
7595 mutex_init(&ctrl_info->lun_reset_mutex);
7596 mutex_init(&ctrl_info->ofa_mutex);
7598 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
7599 spin_lock_init(&ctrl_info->scsi_device_list_lock);
7601 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
7602 atomic_set(&ctrl_info->num_interrupts, 0);
7603 atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
7605 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
7606 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
7608 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
7609 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
7611 sema_init(&ctrl_info->sync_request_sem,
7612 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7613 init_waitqueue_head(&ctrl_info->block_requests_wait);
7615 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
7616 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
7617 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
7618 pqi_raid_bypass_retry_worker);
7620 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
7621 ctrl_info->irq_mode = IRQ_MODE_NONE;
7622 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
7627 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
7632 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
7634 pqi_free_irqs(ctrl_info);
7635 pqi_disable_msix_interrupts(ctrl_info);
7638 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
7640 pqi_stop_heartbeat_timer(ctrl_info);
7641 pqi_free_interrupts(ctrl_info);
7642 if (ctrl_info->queue_memory_base)
7643 dma_free_coherent(&ctrl_info->pci_dev->dev,
7644 ctrl_info->queue_memory_length,
7645 ctrl_info->queue_memory_base,
7646 ctrl_info->queue_memory_base_dma_handle);
7647 if (ctrl_info->admin_queue_memory_base)
7648 dma_free_coherent(&ctrl_info->pci_dev->dev,
7649 ctrl_info->admin_queue_memory_length,
7650 ctrl_info->admin_queue_memory_base,
7651 ctrl_info->admin_queue_memory_base_dma_handle);
7652 pqi_free_all_io_requests(ctrl_info);
7653 if (ctrl_info->error_buffer)
7654 dma_free_coherent(&ctrl_info->pci_dev->dev,
7655 ctrl_info->error_buffer_length,
7656 ctrl_info->error_buffer,
7657 ctrl_info->error_buffer_dma_handle);
7658 if (ctrl_info->iomem_base)
7659 pqi_cleanup_pci_init(ctrl_info);
7660 pqi_free_ctrl_info(ctrl_info);
7663 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
7665 pqi_cancel_rescan_worker(ctrl_info);
7666 pqi_cancel_update_time_worker(ctrl_info);
7667 pqi_unregister_scsi(ctrl_info);
7668 if (ctrl_info->pqi_mode_enabled)
7669 pqi_revert_to_sis_mode(ctrl_info);
7670 pqi_free_ctrl_resources(ctrl_info);
7673 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
7675 pqi_cancel_update_time_worker(ctrl_info);
7676 pqi_cancel_rescan_worker(ctrl_info);
7677 pqi_wait_until_lun_reset_finished(ctrl_info);
7678 pqi_wait_until_scan_finished(ctrl_info);
7679 pqi_ctrl_ofa_start(ctrl_info);
7680 pqi_ctrl_block_requests(ctrl_info);
7681 pqi_ctrl_wait_until_quiesced(ctrl_info);
7682 pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
7683 pqi_fail_io_queued_for_all_devices(ctrl_info);
7684 pqi_wait_until_inbound_queues_empty(ctrl_info);
7685 pqi_stop_heartbeat_timer(ctrl_info);
7686 ctrl_info->pqi_mode_enabled = false;
7687 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7690 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
7692 pqi_ofa_free_host_buffer(ctrl_info);
7693 ctrl_info->pqi_mode_enabled = true;
7694 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7695 ctrl_info->controller_online = true;
7696 pqi_ctrl_unblock_requests(ctrl_info);
7697 pqi_start_heartbeat_timer(ctrl_info);
7698 pqi_schedule_update_time_worker(ctrl_info);
7699 pqi_clear_soft_reset_status(ctrl_info,
7700 PQI_SOFT_RESET_ABORT);
7701 pqi_scan_scsi_devices(ctrl_info);
7704 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
7705 u32 total_size, u32 chunk_size)
7710 struct pqi_sg_descriptor *mem_descriptor = NULL;
7712 struct pqi_ofa_memory *ofap;
7714 dev = &ctrl_info->pci_dev->dev;
7716 sg_count = (total_size + chunk_size - 1);
7717 sg_count /= chunk_size;
7719 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7721 if (sg_count*chunk_size < total_size)
7724 ctrl_info->pqi_ofa_chunk_virt_addr =
7725 kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
7726 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
7729 for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
7730 dma_addr_t dma_handle;
7732 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
7733 dma_alloc_coherent(dev, chunk_size, &dma_handle,
7736 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
7739 mem_descriptor = &ofap->sg_descriptor[i];
7740 put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
7741 put_unaligned_le32 (chunk_size, &mem_descriptor->length);
7744 if (!size || size < total_size)
7745 goto out_free_chunks;
7747 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
7748 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
7749 put_unaligned_le32(size, &ofap->bytes_allocated);
7755 mem_descriptor = &ofap->sg_descriptor[i];
7756 dma_free_coherent(dev, chunk_size,
7757 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7758 get_unaligned_le64(&mem_descriptor->address));
7760 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7763 put_unaligned_le32 (0, &ofap->bytes_allocated);
7767 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
7773 total_size = le32_to_cpu(
7774 ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
7775 min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
7777 for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
7778 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
7784 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
7785 u32 bytes_requested)
7787 struct pqi_ofa_memory *pqi_ofa_memory;
7790 dev = &ctrl_info->pci_dev->dev;
7791 pqi_ofa_memory = dma_alloc_coherent(dev,
7792 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
7793 &ctrl_info->pqi_ofa_mem_dma_handle,
7796 if (!pqi_ofa_memory)
7799 put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
7800 memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
7801 sizeof(pqi_ofa_memory->signature));
7802 pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
7804 ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
7806 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
7807 dev_err(dev, "Failed to allocate host buffer of size = %u",
7814 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
7817 struct pqi_sg_descriptor *mem_descriptor;
7818 struct pqi_ofa_memory *ofap;
7820 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7825 if (!ofap->bytes_allocated)
7828 mem_descriptor = ofap->sg_descriptor;
7830 for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
7832 dma_free_coherent(&ctrl_info->pci_dev->dev,
7833 get_unaligned_le32(&mem_descriptor[i].length),
7834 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7835 get_unaligned_le64(&mem_descriptor[i].address));
7837 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7840 dma_free_coherent(&ctrl_info->pci_dev->dev,
7841 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
7842 ctrl_info->pqi_ofa_mem_dma_handle);
7843 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
7846 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
7848 struct pqi_vendor_general_request request;
7850 struct pqi_ofa_memory *ofap;
7852 memset(&request, 0, sizeof(request));
7854 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7856 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7857 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7858 &request.header.iu_length);
7859 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
7860 &request.function_code);
7863 size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
7864 get_unaligned_le16(&ofap->num_memory_descriptors) *
7865 sizeof(struct pqi_sg_descriptor);
7867 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
7868 &request.data.ofa_memory_allocation.buffer_address);
7869 put_unaligned_le32(size,
7870 &request.data.ofa_memory_allocation.buffer_length);
7874 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
7875 0, NULL, NO_TIMEOUT);
7878 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
7880 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7881 return pqi_ctrl_init_resume(ctrl_info);
7884 static void pqi_perform_lockup_action(void)
7886 switch (pqi_lockup_action) {
7888 panic("FATAL: Smart Family Controller lockup detected");
7891 emergency_restart();
7899 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
7900 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
7901 .status = SAM_STAT_CHECK_CONDITION,
7904 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
7907 struct pqi_io_request *io_request;
7908 struct scsi_cmnd *scmd;
7910 for (i = 0; i < ctrl_info->max_io_slots; i++) {
7911 io_request = &ctrl_info->io_request_pool[i];
7912 if (atomic_read(&io_request->refcount) == 0)
7915 scmd = io_request->scmd;
7917 set_host_byte(scmd, DID_NO_CONNECT);
7919 io_request->status = -ENXIO;
7920 io_request->error_info =
7921 &pqi_ctrl_offline_raid_error_info;
7924 io_request->io_complete_callback(io_request,
7925 io_request->context);
7929 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
7931 pqi_perform_lockup_action();
7932 pqi_stop_heartbeat_timer(ctrl_info);
7933 pqi_free_interrupts(ctrl_info);
7934 pqi_cancel_rescan_worker(ctrl_info);
7935 pqi_cancel_update_time_worker(ctrl_info);
7936 pqi_ctrl_wait_until_quiesced(ctrl_info);
7937 pqi_fail_all_outstanding_requests(ctrl_info);
7938 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
7939 pqi_ctrl_unblock_requests(ctrl_info);
7942 static void pqi_ctrl_offline_worker(struct work_struct *work)
7944 struct pqi_ctrl_info *ctrl_info;
7946 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
7947 pqi_take_ctrl_offline_deferred(ctrl_info);
7950 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
7952 if (!ctrl_info->controller_online)
7955 ctrl_info->controller_online = false;
7956 ctrl_info->pqi_mode_enabled = false;
7957 pqi_ctrl_block_requests(ctrl_info);
7958 if (!pqi_disable_ctrl_shutdown)
7959 sis_shutdown_ctrl(ctrl_info);
7960 pci_disable_device(ctrl_info->pci_dev);
7961 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
7962 schedule_work(&ctrl_info->ctrl_offline_work);
7965 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
7966 const struct pci_device_id *id)
7968 char *ctrl_description;
7970 if (id->driver_data)
7971 ctrl_description = (char *)id->driver_data;
7973 ctrl_description = "Microsemi Smart Family Controller";
7975 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
7978 static int pqi_pci_probe(struct pci_dev *pci_dev,
7979 const struct pci_device_id *id)
7983 struct pqi_ctrl_info *ctrl_info;
7985 pqi_print_ctrl_info(pci_dev, id);
7987 if (pqi_disable_device_id_wildcards &&
7988 id->subvendor == PCI_ANY_ID &&
7989 id->subdevice == PCI_ANY_ID) {
7990 dev_warn(&pci_dev->dev,
7991 "controller not probed because device ID wildcards are disabled\n");
7995 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
7996 dev_warn(&pci_dev->dev,
7997 "controller device ID matched using wildcards\n");
7999 node = dev_to_node(&pci_dev->dev);
8000 if (node == NUMA_NO_NODE) {
8001 cp_node = cpu_to_node(0);
8002 if (cp_node == NUMA_NO_NODE)
8004 set_dev_node(&pci_dev->dev, cp_node);
8007 ctrl_info = pqi_alloc_ctrl_info(node);
8009 dev_err(&pci_dev->dev,
8010 "failed to allocate controller info block\n");
8014 ctrl_info->pci_dev = pci_dev;
8016 rc = pqi_pci_init(ctrl_info);
8020 rc = pqi_ctrl_init(ctrl_info);
8027 pqi_remove_ctrl(ctrl_info);
8032 static void pqi_pci_remove(struct pci_dev *pci_dev)
8034 struct pqi_ctrl_info *ctrl_info;
8036 ctrl_info = pci_get_drvdata(pci_dev);
8040 ctrl_info->in_shutdown = true;
8042 pqi_remove_ctrl(ctrl_info);
8045 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
8048 struct pqi_io_request *io_request;
8049 struct scsi_cmnd *scmd;
8051 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8052 io_request = &ctrl_info->io_request_pool[i];
8053 if (atomic_read(&io_request->refcount) == 0)
8055 scmd = io_request->scmd;
8056 WARN_ON(scmd != NULL); /* IO command from SML */
8057 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
8061 static void pqi_shutdown(struct pci_dev *pci_dev)
8064 struct pqi_ctrl_info *ctrl_info;
8066 ctrl_info = pci_get_drvdata(pci_dev);
8068 dev_err(&pci_dev->dev,
8069 "cache could not be flushed\n");
8073 pqi_disable_events(ctrl_info);
8074 pqi_wait_until_ofa_finished(ctrl_info);
8075 pqi_cancel_update_time_worker(ctrl_info);
8076 pqi_cancel_rescan_worker(ctrl_info);
8077 pqi_cancel_event_worker(ctrl_info);
8079 pqi_ctrl_shutdown_start(ctrl_info);
8080 pqi_ctrl_wait_until_quiesced(ctrl_info);
8082 rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
8084 dev_err(&pci_dev->dev,
8085 "wait for pending I/O failed\n");
8089 pqi_ctrl_block_device_reset(ctrl_info);
8090 pqi_wait_until_lun_reset_finished(ctrl_info);
8093 * Write all data in the controller's battery-backed cache to
8096 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
8098 dev_err(&pci_dev->dev,
8099 "unable to flush controller cache\n");
8101 pqi_ctrl_block_requests(ctrl_info);
8103 rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
8105 dev_err(&pci_dev->dev,
8106 "wait for pending sync cmds failed\n");
8110 pqi_crash_if_pending_command(ctrl_info);
8111 pqi_reset(ctrl_info);
8114 static void pqi_process_lockup_action_param(void)
8118 if (!pqi_lockup_action_param)
8121 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
8122 if (strcmp(pqi_lockup_action_param,
8123 pqi_lockup_actions[i].name) == 0) {
8124 pqi_lockup_action = pqi_lockup_actions[i].action;
8129 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
8130 DRIVER_NAME_SHORT, pqi_lockup_action_param);
8133 static void pqi_process_module_params(void)
8135 pqi_process_lockup_action_param();
8138 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
8140 struct pqi_ctrl_info *ctrl_info;
8142 ctrl_info = pci_get_drvdata(pci_dev);
8144 pqi_disable_events(ctrl_info);
8145 pqi_cancel_update_time_worker(ctrl_info);
8146 pqi_cancel_rescan_worker(ctrl_info);
8147 pqi_wait_until_scan_finished(ctrl_info);
8148 pqi_wait_until_lun_reset_finished(ctrl_info);
8149 pqi_wait_until_ofa_finished(ctrl_info);
8150 pqi_flush_cache(ctrl_info, SUSPEND);
8151 pqi_ctrl_block_requests(ctrl_info);
8152 pqi_ctrl_wait_until_quiesced(ctrl_info);
8153 pqi_wait_until_inbound_queues_empty(ctrl_info);
8154 pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
8155 pqi_stop_heartbeat_timer(ctrl_info);
8157 if (state.event == PM_EVENT_FREEZE)
8160 pci_save_state(pci_dev);
8161 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
8163 ctrl_info->controller_online = false;
8164 ctrl_info->pqi_mode_enabled = false;
8169 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
8172 struct pqi_ctrl_info *ctrl_info;
8174 ctrl_info = pci_get_drvdata(pci_dev);
8176 if (pci_dev->current_state != PCI_D0) {
8177 ctrl_info->max_hw_queue_index = 0;
8178 pqi_free_interrupts(ctrl_info);
8179 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
8180 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
8181 IRQF_SHARED, DRIVER_NAME_SHORT,
8182 &ctrl_info->queue_groups[0]);
8184 dev_err(&ctrl_info->pci_dev->dev,
8185 "irq %u init failed with error %d\n",
8189 pqi_start_heartbeat_timer(ctrl_info);
8190 pqi_ctrl_unblock_requests(ctrl_info);
8194 pci_set_power_state(pci_dev, PCI_D0);
8195 pci_restore_state(pci_dev);
8197 return pqi_ctrl_init_resume(ctrl_info);
8200 /* Define the PCI IDs for the controllers that we support. */
8201 static const struct pci_device_id pqi_pci_id_table[] = {
8203 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8207 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8211 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8215 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8219 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8223 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8227 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8231 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8235 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8239 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8243 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8247 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8251 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8255 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8259 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8263 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8267 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8271 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8275 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8279 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8283 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8287 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8291 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8295 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8299 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8303 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8307 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8311 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8315 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8319 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8323 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8327 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8331 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8335 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8339 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8343 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8347 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8348 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
8351 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8352 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
8355 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8356 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
8359 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8360 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
8363 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8364 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
8367 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8368 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
8371 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8372 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
8375 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8376 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
8379 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8380 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
8383 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8384 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
8387 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8388 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
8391 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8392 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
8395 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8396 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
8399 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8400 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
8403 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8404 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
8407 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8408 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
8411 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8412 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
8415 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8416 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
8419 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8420 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
8423 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8424 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
8427 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8428 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
8431 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8432 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
8435 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8436 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
8439 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8440 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
8443 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8444 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
8447 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8448 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
8451 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8452 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
8455 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8456 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
8459 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8460 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
8463 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8464 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
8467 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8468 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
8471 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8472 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
8475 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8476 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
8479 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8480 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
8483 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8484 PCI_VENDOR_ID_ADAPTEC2, 0x1400)
8487 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8488 PCI_VENDOR_ID_ADAPTEC2, 0x1402)
8491 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8492 PCI_VENDOR_ID_ADAPTEC2, 0x1410)
8495 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8496 PCI_VENDOR_ID_ADAPTEC2, 0x1411)
8499 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8500 PCI_VENDOR_ID_ADAPTEC2, 0x1412)
8503 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8504 PCI_VENDOR_ID_ADAPTEC2, 0x1420)
8507 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8508 PCI_VENDOR_ID_ADAPTEC2, 0x1430)
8511 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8512 PCI_VENDOR_ID_ADAPTEC2, 0x1440)
8515 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8516 PCI_VENDOR_ID_ADAPTEC2, 0x1441)
8519 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8520 PCI_VENDOR_ID_ADAPTEC2, 0x1450)
8523 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8524 PCI_VENDOR_ID_ADAPTEC2, 0x1452)
8527 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8528 PCI_VENDOR_ID_ADAPTEC2, 0x1460)
8531 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8532 PCI_VENDOR_ID_ADAPTEC2, 0x1461)
8535 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8536 PCI_VENDOR_ID_ADAPTEC2, 0x1462)
8539 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8540 PCI_VENDOR_ID_ADAPTEC2, 0x1470)
8543 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8544 PCI_VENDOR_ID_ADAPTEC2, 0x1471)
8547 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8548 PCI_VENDOR_ID_ADAPTEC2, 0x1472)
8551 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8552 PCI_VENDOR_ID_ADAPTEC2, 0x1480)
8555 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8556 PCI_VENDOR_ID_ADAPTEC2, 0x1490)
8559 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8560 PCI_VENDOR_ID_ADAPTEC2, 0x1491)
8563 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8564 PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
8567 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8568 PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
8571 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8572 PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
8575 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8576 PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
8579 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8580 PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
8583 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8584 PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
8587 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8588 PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
8591 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8592 PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
8595 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8596 PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
8599 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8600 PCI_VENDOR_ID_ADVANTECH, 0x8312)
8603 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8604 PCI_VENDOR_ID_DELL, 0x1fe0)
8607 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8608 PCI_VENDOR_ID_HP, 0x0600)
8611 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8612 PCI_VENDOR_ID_HP, 0x0601)
8615 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8616 PCI_VENDOR_ID_HP, 0x0602)
8619 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8620 PCI_VENDOR_ID_HP, 0x0603)
8623 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8624 PCI_VENDOR_ID_HP, 0x0609)
8627 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8628 PCI_VENDOR_ID_HP, 0x0650)
8631 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8632 PCI_VENDOR_ID_HP, 0x0651)
8635 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8636 PCI_VENDOR_ID_HP, 0x0652)
8639 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8640 PCI_VENDOR_ID_HP, 0x0653)
8643 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8644 PCI_VENDOR_ID_HP, 0x0654)
8647 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8648 PCI_VENDOR_ID_HP, 0x0655)
8651 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8652 PCI_VENDOR_ID_HP, 0x0700)
8655 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8656 PCI_VENDOR_ID_HP, 0x0701)
8659 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8660 PCI_VENDOR_ID_HP, 0x1001)
8663 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8664 PCI_VENDOR_ID_HP, 0x1002)
8667 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8668 PCI_VENDOR_ID_HP, 0x1100)
8671 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8672 PCI_VENDOR_ID_HP, 0x1101)
8675 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8679 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8683 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8687 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8691 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8695 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8699 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8703 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8707 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8708 PCI_VENDOR_ID_GIGABYTE, 0x1000)
8711 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8712 PCI_ANY_ID, PCI_ANY_ID)
8717 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
8719 static struct pci_driver pqi_pci_driver = {
8720 .name = DRIVER_NAME_SHORT,
8721 .id_table = pqi_pci_id_table,
8722 .probe = pqi_pci_probe,
8723 .remove = pqi_pci_remove,
8724 .shutdown = pqi_shutdown,
8725 #if defined(CONFIG_PM)
8726 .suspend = pqi_suspend,
8727 .resume = pqi_resume,
8731 static int __init pqi_init(void)
8735 pr_info(DRIVER_NAME "\n");
8737 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
8738 if (!pqi_sas_transport_template)
8741 pqi_process_module_params();
8743 rc = pci_register_driver(&pqi_pci_driver);
8745 sas_release_transport(pqi_sas_transport_template);
8750 static void __exit pqi_cleanup(void)
8752 pci_unregister_driver(&pqi_pci_driver);
8753 sas_release_transport(pqi_sas_transport_template);
8756 module_init(pqi_init);
8757 module_exit(pqi_cleanup);
8759 static void __attribute__((unused)) verify_structures(void)
8761 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8762 sis_host_to_ctrl_doorbell) != 0x20);
8763 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8764 sis_interrupt_mask) != 0x34);
8765 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8766 sis_ctrl_to_host_doorbell) != 0x9c);
8767 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8768 sis_ctrl_to_host_doorbell_clear) != 0xa0);
8769 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8770 sis_driver_scratch) != 0xb0);
8771 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8772 sis_firmware_status) != 0xbc);
8773 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8774 sis_mailbox) != 0x1000);
8775 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8776 pqi_registers) != 0x4000);
8778 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8780 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8782 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8783 response_queue_id) != 0x4);
8784 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8786 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
8788 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8790 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8791 service_response) != 0x1);
8792 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8793 data_present) != 0x2);
8794 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8796 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8797 residual_count) != 0x4);
8798 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8799 data_length) != 0x8);
8800 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8802 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8804 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
8806 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8807 data_in_result) != 0x0);
8808 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8809 data_out_result) != 0x1);
8810 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8812 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8814 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8815 status_qualifier) != 0x6);
8816 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8817 sense_data_length) != 0x8);
8818 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8819 response_data_length) != 0xa);
8820 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8821 data_in_transferred) != 0xc);
8822 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8823 data_out_transferred) != 0x10);
8824 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8826 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
8828 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8830 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8831 function_and_status_code) != 0x8);
8832 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8833 max_admin_iq_elements) != 0x10);
8834 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8835 max_admin_oq_elements) != 0x11);
8836 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8837 admin_iq_element_length) != 0x12);
8838 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8839 admin_oq_element_length) != 0x13);
8840 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8841 max_reset_timeout) != 0x14);
8842 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8843 legacy_intx_status) != 0x18);
8844 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8845 legacy_intx_mask_set) != 0x1c);
8846 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8847 legacy_intx_mask_clear) != 0x20);
8848 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8849 device_status) != 0x40);
8850 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8851 admin_iq_pi_offset) != 0x48);
8852 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8853 admin_oq_ci_offset) != 0x50);
8854 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8855 admin_iq_element_array_addr) != 0x58);
8856 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8857 admin_oq_element_array_addr) != 0x60);
8858 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8859 admin_iq_ci_addr) != 0x68);
8860 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8861 admin_oq_pi_addr) != 0x70);
8862 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8863 admin_iq_num_elements) != 0x78);
8864 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8865 admin_oq_num_elements) != 0x79);
8866 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8867 admin_queue_int_msg_num) != 0x7a);
8868 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8869 device_error) != 0x80);
8870 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8871 error_details) != 0x88);
8872 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8873 device_reset) != 0x90);
8874 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8875 power_action) != 0x94);
8876 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
8878 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8879 header.iu_type) != 0);
8880 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8881 header.iu_length) != 2);
8882 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8883 header.work_area) != 6);
8884 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8886 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8887 function_code) != 10);
8888 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8889 data.report_device_capability.buffer_length) != 44);
8890 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8891 data.report_device_capability.sg_descriptor) != 48);
8892 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8893 data.create_operational_iq.queue_id) != 12);
8894 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8895 data.create_operational_iq.element_array_addr) != 16);
8896 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8897 data.create_operational_iq.ci_addr) != 24);
8898 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8899 data.create_operational_iq.num_elements) != 32);
8900 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8901 data.create_operational_iq.element_length) != 34);
8902 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8903 data.create_operational_iq.queue_protocol) != 36);
8904 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8905 data.create_operational_oq.queue_id) != 12);
8906 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8907 data.create_operational_oq.element_array_addr) != 16);
8908 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8909 data.create_operational_oq.pi_addr) != 24);
8910 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8911 data.create_operational_oq.num_elements) != 32);
8912 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8913 data.create_operational_oq.element_length) != 34);
8914 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8915 data.create_operational_oq.queue_protocol) != 36);
8916 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8917 data.create_operational_oq.int_msg_num) != 40);
8918 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8919 data.create_operational_oq.coalescing_count) != 42);
8920 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8921 data.create_operational_oq.min_coalescing_time) != 44);
8922 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8923 data.create_operational_oq.max_coalescing_time) != 48);
8924 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8925 data.delete_operational_queue.queue_id) != 12);
8926 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
8927 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
8928 data.create_operational_iq) != 64 - 11);
8929 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
8930 data.create_operational_oq) != 64 - 11);
8931 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
8932 data.delete_operational_queue) != 64 - 11);
8934 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8935 header.iu_type) != 0);
8936 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8937 header.iu_length) != 2);
8938 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8939 header.work_area) != 6);
8940 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8942 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8943 function_code) != 10);
8944 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8946 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8947 data.create_operational_iq.status_descriptor) != 12);
8948 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8949 data.create_operational_iq.iq_pi_offset) != 16);
8950 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8951 data.create_operational_oq.status_descriptor) != 12);
8952 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8953 data.create_operational_oq.oq_ci_offset) != 16);
8954 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
8956 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8957 header.iu_type) != 0);
8958 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8959 header.iu_length) != 2);
8960 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8961 header.response_queue_id) != 4);
8962 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8963 header.work_area) != 6);
8964 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8966 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8968 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8969 buffer_length) != 12);
8970 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8972 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8973 protocol_specific) != 24);
8974 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8975 error_index) != 27);
8976 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8978 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8980 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8981 sg_descriptors) != 64);
8982 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
8983 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8985 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8986 header.iu_type) != 0);
8987 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8988 header.iu_length) != 2);
8989 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8990 header.response_queue_id) != 4);
8991 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8992 header.work_area) != 6);
8993 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8995 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8997 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8998 buffer_length) != 16);
8999 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9000 data_encryption_key_index) != 22);
9001 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9002 encrypt_tweak_lower) != 24);
9003 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9004 encrypt_tweak_upper) != 28);
9005 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9007 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9008 error_index) != 48);
9009 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9010 num_sg_descriptors) != 50);
9011 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9013 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9015 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9016 sg_descriptors) != 64);
9017 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
9018 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
9020 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9021 header.iu_type) != 0);
9022 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9023 header.iu_length) != 2);
9024 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9026 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9027 error_index) != 10);
9029 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9030 header.iu_type) != 0);
9031 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9032 header.iu_length) != 2);
9033 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9034 header.response_queue_id) != 4);
9035 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9037 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9038 data.report_event_configuration.buffer_length) != 12);
9039 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9040 data.report_event_configuration.sg_descriptors) != 16);
9041 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9042 data.set_event_configuration.global_event_oq_id) != 10);
9043 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9044 data.set_event_configuration.buffer_length) != 12);
9045 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9046 data.set_event_configuration.sg_descriptors) != 16);
9048 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
9049 max_inbound_iu_length) != 6);
9050 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
9051 max_outbound_iu_length) != 14);
9052 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
9054 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9056 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9057 iq_arbitration_priority_support_bitmask) != 8);
9058 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9059 maximum_aw_a) != 9);
9060 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9061 maximum_aw_b) != 10);
9062 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9063 maximum_aw_c) != 11);
9064 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9065 max_inbound_queues) != 16);
9066 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9067 max_elements_per_iq) != 18);
9068 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9069 max_iq_element_length) != 24);
9070 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9071 min_iq_element_length) != 26);
9072 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9073 max_outbound_queues) != 30);
9074 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9075 max_elements_per_oq) != 32);
9076 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9077 intr_coalescing_time_granularity) != 34);
9078 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9079 max_oq_element_length) != 36);
9080 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9081 min_oq_element_length) != 38);
9082 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9083 iu_layer_descriptors) != 64);
9084 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
9086 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
9088 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
9090 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
9092 BUILD_BUG_ON(offsetof(struct pqi_event_config,
9093 num_event_descriptors) != 2);
9094 BUILD_BUG_ON(offsetof(struct pqi_event_config,
9097 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
9098 ARRAY_SIZE(pqi_supported_event_types));
9100 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9101 header.iu_type) != 0);
9102 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9103 header.iu_length) != 2);
9104 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9106 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9108 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9109 additional_event_id) != 12);
9110 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9112 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
9114 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9115 header.iu_type) != 0);
9116 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9117 header.iu_length) != 2);
9118 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9120 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9122 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9123 additional_event_id) != 12);
9124 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
9126 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9127 header.iu_type) != 0);
9128 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9129 header.iu_length) != 2);
9130 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9132 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9134 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9136 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9138 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9139 protocol_specific) != 24);
9140 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9141 outbound_queue_id_to_manage) != 26);
9142 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9143 request_id_to_manage) != 28);
9144 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9145 task_management_function) != 30);
9146 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
9148 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9149 header.iu_type) != 0);
9150 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9151 header.iu_length) != 2);
9152 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9154 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9156 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9157 additional_response_info) != 12);
9158 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9159 response_code) != 15);
9160 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
9162 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9163 configured_logical_drive_count) != 0);
9164 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9165 configuration_signature) != 1);
9166 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9167 firmware_version) != 5);
9168 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9169 extended_logical_unit_count) != 154);
9170 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9171 firmware_build_number) != 190);
9172 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9173 controller_mode) != 292);
9175 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9176 phys_bay_in_box) != 115);
9177 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9178 device_type) != 120);
9179 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9180 redundant_path_present_map) != 1736);
9181 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9182 active_path_number) != 1738);
9183 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9184 alternate_paths_phys_connector) != 1739);
9185 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9186 alternate_paths_phys_box_on_port) != 1755);
9187 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9188 current_queue_depth_limit) != 1796);
9189 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
9191 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
9192 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
9193 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
9194 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9195 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
9196 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9197 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
9198 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
9199 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9200 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
9201 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
9202 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9204 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
9205 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
9206 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);