1 // SPDX-License-Identifier: GPL-2.0
3 * driver for Microchip PQI-based storage controllers
4 * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
8 * Questions/Comments/Bugfixes to storagedev@microchip.com
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
30 #include "smartpqi_sis.h"
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
36 #define DRIVER_VERSION "2.1.14-035"
37 #define DRIVER_MAJOR 2
38 #define DRIVER_MINOR 1
39 #define DRIVER_RELEASE 14
40 #define DRIVER_REVISION 35
42 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT "smartpqi"
46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
48 #define PQI_POST_RESET_DELAY_SECS 5
49 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
51 MODULE_AUTHOR("Microchip");
52 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
54 MODULE_VERSION(DRIVER_VERSION);
55 MODULE_LICENSE("GPL");
61 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
63 return scsi_cmd_priv(cmd);
66 static void pqi_verify_structures(void);
67 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
68 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
69 static void pqi_ctrl_offline_worker(struct work_struct *work);
70 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
71 static void pqi_scan_start(struct Scsi_Host *shost);
72 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
73 struct pqi_queue_group *queue_group, enum pqi_io_path path,
74 struct pqi_io_request *io_request);
75 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
76 struct pqi_iu_header *request, unsigned int flags,
77 struct pqi_raid_error_info *error_info);
78 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
79 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
80 unsigned int cdb_length, struct pqi_queue_group *queue_group,
81 struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
82 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
83 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
84 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
85 struct pqi_scsi_dev_raid_map_data *rmd);
86 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
87 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
88 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
89 struct pqi_scsi_dev_raid_map_data *rmd);
90 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
91 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
92 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
93 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
94 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
95 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
96 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
97 struct pqi_scsi_dev *device, unsigned long timeout_msecs);
99 /* for flags argument to pqi_submit_raid_request_synchronous() */
100 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
102 static struct scsi_transport_template *pqi_sas_transport_template;
104 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
106 enum pqi_lockup_action {
112 static enum pqi_lockup_action pqi_lockup_action = NONE;
115 enum pqi_lockup_action action;
117 } pqi_lockup_actions[] = {
132 static unsigned int pqi_supported_event_types[] = {
133 PQI_EVENT_TYPE_HOTPLUG,
134 PQI_EVENT_TYPE_HARDWARE,
135 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
136 PQI_EVENT_TYPE_LOGICAL_DEVICE,
138 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
139 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
142 static int pqi_disable_device_id_wildcards;
143 module_param_named(disable_device_id_wildcards,
144 pqi_disable_device_id_wildcards, int, 0644);
145 MODULE_PARM_DESC(disable_device_id_wildcards,
146 "Disable device ID wildcards.");
148 static int pqi_disable_heartbeat;
149 module_param_named(disable_heartbeat,
150 pqi_disable_heartbeat, int, 0644);
151 MODULE_PARM_DESC(disable_heartbeat,
152 "Disable heartbeat.");
154 static int pqi_disable_ctrl_shutdown;
155 module_param_named(disable_ctrl_shutdown,
156 pqi_disable_ctrl_shutdown, int, 0644);
157 MODULE_PARM_DESC(disable_ctrl_shutdown,
158 "Disable controller shutdown when controller locked up.");
160 static char *pqi_lockup_action_param;
161 module_param_named(lockup_action,
162 pqi_lockup_action_param, charp, 0644);
163 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
164 "\t\tSupported: none, reboot, panic\n"
165 "\t\tDefault: none");
167 static int pqi_expose_ld_first;
168 module_param_named(expose_ld_first,
169 pqi_expose_ld_first, int, 0644);
170 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
172 static int pqi_hide_vsep;
173 module_param_named(hide_vsep,
174 pqi_hide_vsep, int, 0644);
175 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
177 static char *raid_levels[] = {
187 static char *pqi_raid_level_to_string(u8 raid_level)
189 if (raid_level < ARRAY_SIZE(raid_levels))
190 return raid_levels[raid_level];
192 return "RAID UNKNOWN";
197 #define SA_RAID_1 2 /* also used for RAID 10 */
198 #define SA_RAID_5 3 /* also used for RAID 50 */
200 #define SA_RAID_6 5 /* also used for RAID 60 */
201 #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
202 #define SA_RAID_MAX SA_RAID_TRIPLE
203 #define SA_RAID_UNKNOWN 0xff
205 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
207 pqi_prep_for_scsi_done(scmd);
211 static inline void pqi_disable_write_same(struct scsi_device *sdev)
213 sdev->no_write_same = 1;
216 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
218 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
221 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
223 return !device->is_physical_device;
226 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
228 return scsi3addr[2] != 0;
231 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
233 return !ctrl_info->controller_online;
236 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
238 if (ctrl_info->controller_online)
239 if (!sis_is_firmware_running(ctrl_info))
240 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
243 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
245 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
248 #define PQI_DRIVER_SCRATCH_PQI_MODE 0x1
249 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2
251 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
253 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
256 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
257 enum pqi_ctrl_mode mode)
261 driver_scratch = sis_read_driver_scratch(ctrl_info);
263 if (mode == PQI_MODE)
264 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
266 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
268 sis_write_driver_scratch(ctrl_info, driver_scratch);
271 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
273 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
276 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
280 driver_scratch = sis_read_driver_scratch(ctrl_info);
283 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
285 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
287 sis_write_driver_scratch(ctrl_info, driver_scratch);
290 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
292 ctrl_info->scan_blocked = true;
293 mutex_lock(&ctrl_info->scan_mutex);
296 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
298 ctrl_info->scan_blocked = false;
299 mutex_unlock(&ctrl_info->scan_mutex);
302 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
304 return ctrl_info->scan_blocked;
307 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
309 mutex_lock(&ctrl_info->lun_reset_mutex);
312 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
314 mutex_unlock(&ctrl_info->lun_reset_mutex);
317 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
319 struct Scsi_Host *shost;
320 unsigned int num_loops;
323 shost = ctrl_info->scsi_host;
325 scsi_block_requests(shost);
329 while (scsi_host_busy(shost)) {
337 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
339 scsi_unblock_requests(ctrl_info->scsi_host);
342 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
344 atomic_inc(&ctrl_info->num_busy_threads);
347 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
349 atomic_dec(&ctrl_info->num_busy_threads);
352 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
354 return ctrl_info->block_requests;
357 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
359 ctrl_info->block_requests = true;
362 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
364 ctrl_info->block_requests = false;
365 wake_up_all(&ctrl_info->block_requests_wait);
368 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
370 if (!pqi_ctrl_blocked(ctrl_info))
373 atomic_inc(&ctrl_info->num_blocked_threads);
374 wait_event(ctrl_info->block_requests_wait,
375 !pqi_ctrl_blocked(ctrl_info));
376 atomic_dec(&ctrl_info->num_blocked_threads);
379 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
381 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
383 unsigned long start_jiffies;
384 unsigned long warning_timeout;
385 bool displayed_warning;
387 displayed_warning = false;
388 start_jiffies = jiffies;
389 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
391 while (atomic_read(&ctrl_info->num_busy_threads) >
392 atomic_read(&ctrl_info->num_blocked_threads)) {
393 if (time_after(jiffies, warning_timeout)) {
394 dev_warn(&ctrl_info->pci_dev->dev,
395 "waiting %u seconds for driver activity to quiesce\n",
396 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
397 displayed_warning = true;
398 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
400 usleep_range(1000, 2000);
403 if (displayed_warning)
404 dev_warn(&ctrl_info->pci_dev->dev,
405 "driver activity quiesced after waiting for %u seconds\n",
406 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
409 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
411 return device->device_offline;
414 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
416 mutex_lock(&ctrl_info->ofa_mutex);
419 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
421 mutex_unlock(&ctrl_info->ofa_mutex);
424 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
426 mutex_lock(&ctrl_info->ofa_mutex);
427 mutex_unlock(&ctrl_info->ofa_mutex);
430 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
432 return mutex_is_locked(&ctrl_info->ofa_mutex);
435 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
437 device->in_remove = true;
440 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
442 return device->in_remove;
445 static inline int pqi_event_type_to_event_index(unsigned int event_type)
449 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
450 if (event_type == pqi_supported_event_types[index])
456 static inline bool pqi_is_supported_event(unsigned int event_type)
458 return pqi_event_type_to_event_index(event_type) != -1;
461 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
464 if (pqi_ctrl_offline(ctrl_info))
467 schedule_delayed_work(&ctrl_info->rescan_work, delay);
470 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
472 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
475 #define PQI_RESCAN_WORK_DELAY (10 * HZ)
477 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
479 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
482 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
484 cancel_delayed_work_sync(&ctrl_info->rescan_work);
487 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
489 if (!ctrl_info->heartbeat_counter)
492 return readl(ctrl_info->heartbeat_counter);
495 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
497 return readb(ctrl_info->soft_reset_status);
500 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
504 status = pqi_read_soft_reset_status(ctrl_info);
505 status &= ~PQI_SOFT_RESET_ABORT;
506 writeb(status, ctrl_info->soft_reset_status);
509 static int pqi_map_single(struct pci_dev *pci_dev,
510 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
511 size_t buffer_length, enum dma_data_direction data_direction)
513 dma_addr_t bus_address;
515 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
518 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
520 if (dma_mapping_error(&pci_dev->dev, bus_address))
523 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
524 put_unaligned_le32(buffer_length, &sg_descriptor->length);
525 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
530 static void pqi_pci_unmap(struct pci_dev *pci_dev,
531 struct pqi_sg_descriptor *descriptors, int num_descriptors,
532 enum dma_data_direction data_direction)
536 if (data_direction == DMA_NONE)
539 for (i = 0; i < num_descriptors; i++)
540 dma_unmap_single(&pci_dev->dev,
541 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
542 get_unaligned_le32(&descriptors[i].length),
546 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
547 struct pqi_raid_path_request *request, u8 cmd,
548 u8 *scsi3addr, void *buffer, size_t buffer_length,
549 u16 vpd_page, enum dma_data_direction *dir)
552 size_t cdb_length = buffer_length;
554 memset(request, 0, sizeof(*request));
556 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
557 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
558 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
559 &request->header.iu_length);
560 put_unaligned_le32(buffer_length, &request->buffer_length);
561 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
562 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
563 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
568 case TEST_UNIT_READY:
569 request->data_direction = SOP_READ_FLAG;
570 cdb[0] = TEST_UNIT_READY;
573 request->data_direction = SOP_READ_FLAG;
575 if (vpd_page & VPD_PAGE) {
577 cdb[2] = (u8)vpd_page;
579 cdb[4] = (u8)cdb_length;
581 case CISS_REPORT_LOG:
582 case CISS_REPORT_PHYS:
583 request->data_direction = SOP_READ_FLAG;
585 if (cmd == CISS_REPORT_PHYS) {
586 if (ctrl_info->rpl_extended_format_4_5_supported)
587 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
589 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
591 cdb[1] = ctrl_info->ciss_report_log_flags;
593 put_unaligned_be32(cdb_length, &cdb[6]);
595 case CISS_GET_RAID_MAP:
596 request->data_direction = SOP_READ_FLAG;
598 cdb[1] = CISS_GET_RAID_MAP;
599 put_unaligned_be32(cdb_length, &cdb[6]);
602 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
603 request->data_direction = SOP_WRITE_FLAG;
605 cdb[6] = BMIC_FLUSH_CACHE;
606 put_unaligned_be16(cdb_length, &cdb[7]);
608 case BMIC_SENSE_DIAG_OPTIONS:
611 case BMIC_IDENTIFY_CONTROLLER:
612 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
613 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
614 case BMIC_SENSE_FEATURE:
615 request->data_direction = SOP_READ_FLAG;
618 put_unaligned_be16(cdb_length, &cdb[7]);
620 case BMIC_SET_DIAG_OPTIONS:
623 case BMIC_WRITE_HOST_WELLNESS:
624 request->data_direction = SOP_WRITE_FLAG;
627 put_unaligned_be16(cdb_length, &cdb[7]);
629 case BMIC_CSMI_PASSTHRU:
630 request->data_direction = SOP_BIDIRECTIONAL;
632 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
634 put_unaligned_be16(cdb_length, &cdb[7]);
637 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
641 switch (request->data_direction) {
643 *dir = DMA_FROM_DEVICE;
646 *dir = DMA_TO_DEVICE;
648 case SOP_NO_DIRECTION_FLAG:
652 *dir = DMA_BIDIRECTIONAL;
656 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
657 buffer, buffer_length, *dir);
660 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
662 io_request->scmd = NULL;
663 io_request->status = 0;
664 io_request->error_info = NULL;
665 io_request->raid_bypass = false;
668 static struct pqi_io_request *pqi_alloc_io_request(
669 struct pqi_ctrl_info *ctrl_info)
671 struct pqi_io_request *io_request;
672 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
675 io_request = &ctrl_info->io_request_pool[i];
676 if (atomic_inc_return(&io_request->refcount) == 1)
678 atomic_dec(&io_request->refcount);
679 i = (i + 1) % ctrl_info->max_io_slots;
683 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
685 pqi_reinit_io_request(io_request);
690 static void pqi_free_io_request(struct pqi_io_request *io_request)
692 atomic_dec(&io_request->refcount);
695 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
696 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
697 struct pqi_raid_error_info *error_info)
700 struct pqi_raid_path_request request;
701 enum dma_data_direction dir;
703 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
704 buffer, buffer_length, vpd_page, &dir);
708 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
710 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
715 /* helper functions for pqi_send_scsi_raid_request */
717 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
718 u8 cmd, void *buffer, size_t buffer_length)
720 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
721 buffer, buffer_length, 0, NULL);
724 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
725 u8 cmd, void *buffer, size_t buffer_length,
726 struct pqi_raid_error_info *error_info)
728 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
729 buffer, buffer_length, 0, error_info);
732 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
733 struct bmic_identify_controller *buffer)
735 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
736 buffer, sizeof(*buffer));
739 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
740 struct bmic_sense_subsystem_info *sense_info)
742 return pqi_send_ctrl_raid_request(ctrl_info,
743 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
744 sizeof(*sense_info));
747 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
748 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
750 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
751 buffer, buffer_length, vpd_page, NULL);
754 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
755 struct pqi_scsi_dev *device,
756 struct bmic_identify_physical_device *buffer, size_t buffer_length)
759 enum dma_data_direction dir;
760 u16 bmic_device_index;
761 struct pqi_raid_path_request request;
763 rc = pqi_build_raid_path_request(ctrl_info, &request,
764 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
765 buffer_length, 0, &dir);
769 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
770 request.cdb[2] = (u8)bmic_device_index;
771 request.cdb[9] = (u8)(bmic_device_index >> 8);
773 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
775 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
780 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
784 bytes = get_unaligned_le16(limit);
795 struct bmic_sense_feature_buffer {
796 struct bmic_sense_feature_buffer_header header;
797 struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
802 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
803 offsetofend(struct bmic_sense_feature_buffer, \
804 aio_subpage.max_write_raid_1_10_3drive)
806 #define MINIMUM_AIO_SUBPAGE_LENGTH \
807 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
808 max_write_raid_1_10_3drive) - \
809 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
811 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
814 enum dma_data_direction dir;
815 struct pqi_raid_path_request request;
816 struct bmic_sense_feature_buffer *buffer;
818 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
822 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
823 buffer, sizeof(*buffer), 0, &dir);
827 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
828 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
830 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
832 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
837 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
838 buffer->header.subpage_code !=
839 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
840 get_unaligned_le16(&buffer->header.buffer_length) <
841 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
842 buffer->aio_subpage.header.page_code !=
843 BMIC_SENSE_FEATURE_IO_PAGE ||
844 buffer->aio_subpage.header.subpage_code !=
845 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
846 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
847 MINIMUM_AIO_SUBPAGE_LENGTH) {
851 ctrl_info->max_transfer_encrypted_sas_sata =
852 pqi_aio_limit_to_bytes(
853 &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
855 ctrl_info->max_transfer_encrypted_nvme =
856 pqi_aio_limit_to_bytes(
857 &buffer->aio_subpage.max_transfer_encrypted_nvme);
859 ctrl_info->max_write_raid_5_6 =
860 pqi_aio_limit_to_bytes(
861 &buffer->aio_subpage.max_write_raid_5_6);
863 ctrl_info->max_write_raid_1_10_2drive =
864 pqi_aio_limit_to_bytes(
865 &buffer->aio_subpage.max_write_raid_1_10_2drive);
867 ctrl_info->max_write_raid_1_10_3drive =
868 pqi_aio_limit_to_bytes(
869 &buffer->aio_subpage.max_write_raid_1_10_3drive);
877 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
878 enum bmic_flush_cache_shutdown_event shutdown_event)
881 struct bmic_flush_cache *flush_cache;
883 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
887 flush_cache->shutdown_event = shutdown_event;
889 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
890 sizeof(*flush_cache));
897 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
898 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
899 struct pqi_raid_error_info *error_info)
901 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
902 buffer, buffer_length, error_info);
905 #define PQI_FETCH_PTRAID_DATA (1 << 31)
907 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
910 struct bmic_diag_options *diag;
912 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
916 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
917 diag, sizeof(*diag));
921 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
923 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
932 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
933 void *buffer, size_t buffer_length)
935 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
936 buffer, buffer_length);
941 struct bmic_host_wellness_driver_version {
943 u8 driver_version_tag[2];
944 __le16 driver_version_length;
945 char driver_version[32];
946 u8 dont_write_tag[2];
952 static int pqi_write_driver_version_to_host_wellness(
953 struct pqi_ctrl_info *ctrl_info)
956 struct bmic_host_wellness_driver_version *buffer;
957 size_t buffer_length;
959 buffer_length = sizeof(*buffer);
961 buffer = kmalloc(buffer_length, GFP_KERNEL);
965 buffer->start_tag[0] = '<';
966 buffer->start_tag[1] = 'H';
967 buffer->start_tag[2] = 'W';
968 buffer->start_tag[3] = '>';
969 buffer->driver_version_tag[0] = 'D';
970 buffer->driver_version_tag[1] = 'V';
971 put_unaligned_le16(sizeof(buffer->driver_version),
972 &buffer->driver_version_length);
973 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
974 sizeof(buffer->driver_version) - 1);
975 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
976 buffer->dont_write_tag[0] = 'D';
977 buffer->dont_write_tag[1] = 'W';
978 buffer->end_tag[0] = 'Z';
979 buffer->end_tag[1] = 'Z';
981 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
990 struct bmic_host_wellness_time {
995 u8 dont_write_tag[2];
1001 static int pqi_write_current_time_to_host_wellness(
1002 struct pqi_ctrl_info *ctrl_info)
1005 struct bmic_host_wellness_time *buffer;
1006 size_t buffer_length;
1007 time64_t local_time;
1011 buffer_length = sizeof(*buffer);
1013 buffer = kmalloc(buffer_length, GFP_KERNEL);
1017 buffer->start_tag[0] = '<';
1018 buffer->start_tag[1] = 'H';
1019 buffer->start_tag[2] = 'W';
1020 buffer->start_tag[3] = '>';
1021 buffer->time_tag[0] = 'T';
1022 buffer->time_tag[1] = 'D';
1023 put_unaligned_le16(sizeof(buffer->time),
1024 &buffer->time_length);
1026 local_time = ktime_get_real_seconds();
1027 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
1028 year = tm.tm_year + 1900;
1030 buffer->time[0] = bin2bcd(tm.tm_hour);
1031 buffer->time[1] = bin2bcd(tm.tm_min);
1032 buffer->time[2] = bin2bcd(tm.tm_sec);
1033 buffer->time[3] = 0;
1034 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1035 buffer->time[5] = bin2bcd(tm.tm_mday);
1036 buffer->time[6] = bin2bcd(year / 100);
1037 buffer->time[7] = bin2bcd(year % 100);
1039 buffer->dont_write_tag[0] = 'D';
1040 buffer->dont_write_tag[1] = 'W';
1041 buffer->end_tag[0] = 'Z';
1042 buffer->end_tag[1] = 'Z';
1044 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1051 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
1053 static void pqi_update_time_worker(struct work_struct *work)
1056 struct pqi_ctrl_info *ctrl_info;
1058 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1061 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1063 dev_warn(&ctrl_info->pci_dev->dev,
1064 "error updating time on controller\n");
1066 schedule_delayed_work(&ctrl_info->update_time_work,
1067 PQI_UPDATE_TIME_WORK_INTERVAL);
1070 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1072 schedule_delayed_work(&ctrl_info->update_time_work, 0);
1075 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1077 cancel_delayed_work_sync(&ctrl_info->update_time_work);
1080 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1081 size_t buffer_length)
1083 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1086 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1089 size_t lun_list_length;
1090 size_t lun_data_length;
1091 size_t new_lun_list_length;
1092 void *lun_data = NULL;
1093 struct report_lun_header *report_lun_header;
1095 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1096 if (!report_lun_header) {
1101 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1105 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1108 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1110 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1116 if (lun_list_length == 0) {
1117 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1121 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1125 new_lun_list_length =
1126 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1128 if (new_lun_list_length > lun_list_length) {
1129 lun_list_length = new_lun_list_length;
1135 kfree(report_lun_header);
1147 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1151 u8 rpl_response_format;
1153 size_t rpl_16byte_wwid_list_length;
1155 struct report_lun_header *rpl_header;
1156 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1157 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1159 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1163 if (ctrl_info->rpl_extended_format_4_5_supported) {
1164 rpl_header = rpl_list;
1165 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1166 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1169 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1170 dev_err(&ctrl_info->pci_dev->dev,
1171 "RPL returned unsupported data format %u\n",
1172 rpl_response_format);
1175 dev_warn(&ctrl_info->pci_dev->dev,
1176 "RPL returned extended format 2 instead of 4\n");
1180 rpl_8byte_wwid_list = rpl_list;
1181 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1182 rpl_16byte_wwid_list_length = sizeof(struct report_lun_header) + (num_physicals * sizeof(struct report_phys_lun_16byte_wwid));
1184 rpl_16byte_wwid_list = kmalloc(rpl_16byte_wwid_list_length, GFP_KERNEL);
1185 if (!rpl_16byte_wwid_list)
1188 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1189 &rpl_16byte_wwid_list->header.list_length);
1190 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1192 for (i = 0; i < num_physicals; i++) {
1193 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
1194 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1195 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
1196 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1197 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1198 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1199 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1200 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1203 kfree(rpl_8byte_wwid_list);
1204 *buffer = rpl_16byte_wwid_list;
1209 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1211 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1214 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1215 struct report_phys_lun_16byte_wwid_list **physdev_list,
1216 struct report_log_lun_list **logdev_list)
1219 size_t logdev_list_length;
1220 size_t logdev_data_length;
1221 struct report_log_lun_list *internal_logdev_list;
1222 struct report_log_lun_list *logdev_data;
1223 struct report_lun_header report_lun_header;
1225 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1227 dev_err(&ctrl_info->pci_dev->dev,
1228 "report physical LUNs failed\n");
1230 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1232 dev_err(&ctrl_info->pci_dev->dev,
1233 "report logical LUNs failed\n");
1236 * Tack the controller itself onto the end of the logical device list.
1239 logdev_data = *logdev_list;
1242 logdev_list_length =
1243 get_unaligned_be32(&logdev_data->header.list_length);
1245 memset(&report_lun_header, 0, sizeof(report_lun_header));
1247 (struct report_log_lun_list *)&report_lun_header;
1248 logdev_list_length = 0;
1251 logdev_data_length = sizeof(struct report_lun_header) +
1254 internal_logdev_list = kmalloc(logdev_data_length +
1255 sizeof(struct report_log_lun), GFP_KERNEL);
1256 if (!internal_logdev_list) {
1257 kfree(*logdev_list);
1258 *logdev_list = NULL;
1262 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1263 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1264 sizeof(struct report_log_lun));
1265 put_unaligned_be32(logdev_list_length +
1266 sizeof(struct report_log_lun),
1267 &internal_logdev_list->header.list_length);
1269 kfree(*logdev_list);
1270 *logdev_list = internal_logdev_list;
1275 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1276 int bus, int target, int lun)
1279 device->target = target;
1283 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1291 scsi3addr = device->scsi3addr;
1292 lunid = get_unaligned_le32(scsi3addr);
1294 if (pqi_is_hba_lunid(scsi3addr)) {
1295 /* The specified device is the controller. */
1296 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1297 device->target_lun_valid = true;
1301 if (pqi_is_logical_device(device)) {
1302 if (device->is_external_raid_device) {
1303 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1304 target = (lunid >> 16) & 0x3fff;
1307 bus = PQI_RAID_VOLUME_BUS;
1309 lun = lunid & 0x3fff;
1311 pqi_set_bus_target_lun(device, bus, target, lun);
1312 device->target_lun_valid = true;
1317 * Defer target and LUN assignment for non-controller physical devices
1318 * because the SAS transport layer will make these assignments later.
1320 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1323 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1324 struct pqi_scsi_dev *device)
1330 raid_level = SA_RAID_UNKNOWN;
1332 buffer = kmalloc(64, GFP_KERNEL);
1334 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1335 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1337 raid_level = buffer[8];
1338 if (raid_level > SA_RAID_MAX)
1339 raid_level = SA_RAID_UNKNOWN;
1344 device->raid_level = raid_level;
1347 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1348 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1352 u32 r5or6_blocks_per_row;
1354 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1356 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1357 err_msg = "RAID map too small";
1361 if (device->raid_level == SA_RAID_1) {
1362 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1363 err_msg = "invalid RAID-1 map";
1366 } else if (device->raid_level == SA_RAID_TRIPLE) {
1367 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1368 err_msg = "invalid RAID-1(Triple) map";
1371 } else if ((device->raid_level == SA_RAID_5 ||
1372 device->raid_level == SA_RAID_6) &&
1373 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1375 r5or6_blocks_per_row =
1376 get_unaligned_le16(&raid_map->strip_size) *
1377 get_unaligned_le16(&raid_map->data_disks_per_row);
1378 if (r5or6_blocks_per_row == 0) {
1379 err_msg = "invalid RAID-5 or RAID-6 map";
1387 dev_warn(&ctrl_info->pci_dev->dev,
1388 "logical device %08x%08x %s\n",
1389 *((u32 *)&device->scsi3addr),
1390 *((u32 *)&device->scsi3addr[4]), err_msg);
1395 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1396 struct pqi_scsi_dev *device)
1400 struct raid_map *raid_map;
1402 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1406 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1407 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1411 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1413 if (raid_map_size > sizeof(*raid_map)) {
1417 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1421 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1422 device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1426 if (get_unaligned_le32(&raid_map->structure_size)
1428 dev_warn(&ctrl_info->pci_dev->dev,
1429 "requested %u bytes, received %u bytes\n",
1431 get_unaligned_le32(&raid_map->structure_size));
1437 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1441 device->raid_map = raid_map;
1451 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1452 struct pqi_scsi_dev *device)
1454 if (!ctrl_info->lv_drive_type_mix_valid) {
1455 device->max_transfer_encrypted = ~0;
1459 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1460 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1461 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1462 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1463 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1464 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1465 case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1466 case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1467 device->max_transfer_encrypted =
1468 ctrl_info->max_transfer_encrypted_sas_sata;
1470 case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1471 device->max_transfer_encrypted =
1472 ctrl_info->max_transfer_encrypted_nvme;
1474 case LV_DRIVE_TYPE_MIX_UNKNOWN:
1475 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1477 device->max_transfer_encrypted =
1478 min(ctrl_info->max_transfer_encrypted_sas_sata,
1479 ctrl_info->max_transfer_encrypted_nvme);
1484 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1485 struct pqi_scsi_dev *device)
1491 buffer = kmalloc(64, GFP_KERNEL);
1495 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1496 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1500 #define RAID_BYPASS_STATUS 4
1501 #define RAID_BYPASS_CONFIGURED 0x1
1502 #define RAID_BYPASS_ENABLED 0x2
1504 bypass_status = buffer[RAID_BYPASS_STATUS];
1505 device->raid_bypass_configured =
1506 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1507 if (device->raid_bypass_configured &&
1508 (bypass_status & RAID_BYPASS_ENABLED) &&
1509 pqi_get_raid_map(ctrl_info, device) == 0) {
1510 device->raid_bypass_enabled = true;
1511 if (get_unaligned_le16(&device->raid_map->flags) &
1512 RAID_MAP_ENCRYPTION_ENABLED)
1513 pqi_set_max_transfer_encrypted(ctrl_info, device);
1521 * Use vendor-specific VPD to determine online/offline status of a volume.
1524 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1525 struct pqi_scsi_dev *device)
1529 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1530 bool volume_offline = true;
1532 struct ciss_vpd_logical_volume_status *vpd;
1534 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1538 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1539 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1543 if (vpd->page_code != CISS_VPD_LV_STATUS)
1546 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1547 volume_status) + vpd->page_length;
1548 if (page_length < sizeof(*vpd))
1551 volume_status = vpd->volume_status;
1552 volume_flags = get_unaligned_be32(&vpd->flags);
1553 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1558 device->volume_status = volume_status;
1559 device->volume_offline = volume_offline;
1562 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
1563 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
1565 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1566 struct pqi_scsi_dev *device,
1567 struct bmic_identify_physical_device *id_phys)
1571 memset(id_phys, 0, sizeof(*id_phys));
1573 rc = pqi_identify_physical_device(ctrl_info, device,
1574 id_phys, sizeof(*id_phys));
1576 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1580 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1581 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1583 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1584 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1586 device->box_index = id_phys->box_index;
1587 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1588 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1589 device->queue_depth =
1590 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1591 device->active_path_index = id_phys->active_path_number;
1592 device->path_map = id_phys->redundant_path_present_map;
1593 memcpy(&device->box,
1594 &id_phys->alternate_paths_phys_box_on_port,
1595 sizeof(device->box));
1596 memcpy(&device->phys_connector,
1597 &id_phys->alternate_paths_phys_connector,
1598 sizeof(device->phys_connector));
1599 device->bay = id_phys->phys_bay_in_box;
1601 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1604 id_phys->phy_to_phy_map[device->active_path_index];
1606 device->phy_id = 0xFF;
1608 device->ncq_prio_support =
1609 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
1610 PQI_DEVICE_NCQ_PRIO_SUPPORTED);
1615 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1616 struct pqi_scsi_dev *device)
1621 buffer = kmalloc(64, GFP_KERNEL);
1625 /* Send an inquiry to the device to see what it is. */
1626 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1630 scsi_sanitize_inquiry_string(&buffer[8], 8);
1631 scsi_sanitize_inquiry_string(&buffer[16], 16);
1633 device->devtype = buffer[0] & 0x1f;
1634 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1635 memcpy(device->model, &buffer[16], sizeof(device->model));
1637 if (device->devtype == TYPE_DISK) {
1638 if (device->is_external_raid_device) {
1639 device->raid_level = SA_RAID_UNKNOWN;
1640 device->volume_status = CISS_LV_OK;
1641 device->volume_offline = false;
1643 pqi_get_raid_level(ctrl_info, device);
1644 pqi_get_raid_bypass_status(ctrl_info, device);
1645 pqi_get_volume_status(ctrl_info, device);
1656 * Prevent adding drive to OS for some corner cases such as a drive
1657 * undergoing a sanitize operation. Some OSes will continue to poll
1658 * the drive until the sanitize completes, which can take hours,
1659 * resulting in long bootup delays. Commands such as TUR, READ_CAP
1660 * are allowed, but READ/WRITE cause check condition. So the OS
1661 * cannot check/read the partition table.
1662 * Note: devices that have completed sanitize must be re-enabled
1663 * using the management utility.
1665 static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info,
1666 struct pqi_scsi_dev *device)
1670 enum dma_data_direction dir;
1672 int buffer_length = 64;
1673 size_t sense_data_length;
1674 struct scsi_sense_hdr sshdr;
1675 struct pqi_raid_path_request request;
1676 struct pqi_raid_error_info error_info;
1677 bool offline = false; /* Assume keep online */
1679 /* Do not check controllers. */
1680 if (pqi_is_hba_lunid(device->scsi3addr))
1683 /* Do not check LVs. */
1684 if (pqi_is_logical_device(device))
1687 buffer = kmalloc(buffer_length, GFP_KERNEL);
1689 return false; /* Assume not offline */
1691 /* Check for SANITIZE in progress using TUR */
1692 rc = pqi_build_raid_path_request(ctrl_info, &request,
1693 TEST_UNIT_READY, RAID_CTLR_LUNID, buffer,
1694 buffer_length, 0, &dir);
1696 goto out; /* Assume not offline */
1698 memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number));
1700 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info);
1703 goto out; /* Assume not offline */
1705 scsi_status = error_info.status;
1706 sense_data_length = get_unaligned_le16(&error_info.sense_data_length);
1707 if (sense_data_length == 0)
1709 get_unaligned_le16(&error_info.response_data_length);
1710 if (sense_data_length) {
1711 if (sense_data_length > sizeof(error_info.data))
1712 sense_data_length = sizeof(error_info.data);
1715 * Check for sanitize in progress: asc:0x04, ascq: 0x1b
1717 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
1718 scsi_normalize_sense(error_info.data,
1719 sense_data_length, &sshdr) &&
1720 sshdr.sense_key == NOT_READY &&
1721 sshdr.asc == 0x04 &&
1722 sshdr.ascq == 0x1b) {
1723 device->device_offline = true;
1725 goto out; /* Keep device offline */
1734 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1735 struct pqi_scsi_dev *device,
1736 struct bmic_identify_physical_device *id_phys)
1740 if (device->is_expander_smp_device)
1743 if (pqi_is_logical_device(device))
1744 rc = pqi_get_logical_device_info(ctrl_info, device);
1746 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1751 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1752 struct pqi_scsi_dev *device)
1755 static const char unknown_state_str[] =
1756 "Volume is in an unknown state (%u)";
1757 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1759 switch (device->volume_status) {
1761 status = "Volume online";
1763 case CISS_LV_FAILED:
1764 status = "Volume failed";
1766 case CISS_LV_NOT_CONFIGURED:
1767 status = "Volume not configured";
1769 case CISS_LV_DEGRADED:
1770 status = "Volume degraded";
1772 case CISS_LV_READY_FOR_RECOVERY:
1773 status = "Volume ready for recovery operation";
1775 case CISS_LV_UNDERGOING_RECOVERY:
1776 status = "Volume undergoing recovery";
1778 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1779 status = "Wrong physical drive was replaced";
1781 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1782 status = "A physical drive not properly connected";
1784 case CISS_LV_HARDWARE_OVERHEATING:
1785 status = "Hardware is overheating";
1787 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1788 status = "Hardware has overheated";
1790 case CISS_LV_UNDERGOING_EXPANSION:
1791 status = "Volume undergoing expansion";
1793 case CISS_LV_NOT_AVAILABLE:
1794 status = "Volume waiting for transforming volume";
1796 case CISS_LV_QUEUED_FOR_EXPANSION:
1797 status = "Volume queued for expansion";
1799 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1800 status = "Volume disabled due to SCSI ID conflict";
1802 case CISS_LV_EJECTED:
1803 status = "Volume has been ejected";
1805 case CISS_LV_UNDERGOING_ERASE:
1806 status = "Volume undergoing background erase";
1808 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1809 status = "Volume ready for predictive spare rebuild";
1811 case CISS_LV_UNDERGOING_RPI:
1812 status = "Volume undergoing rapid parity initialization";
1814 case CISS_LV_PENDING_RPI:
1815 status = "Volume queued for rapid parity initialization";
1817 case CISS_LV_ENCRYPTED_NO_KEY:
1818 status = "Encrypted volume inaccessible - key not present";
1820 case CISS_LV_UNDERGOING_ENCRYPTION:
1821 status = "Volume undergoing encryption process";
1823 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1824 status = "Volume undergoing encryption re-keying process";
1826 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1827 status = "Volume encrypted but encryption is disabled";
1829 case CISS_LV_PENDING_ENCRYPTION:
1830 status = "Volume pending migration to encrypted state";
1832 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1833 status = "Volume pending encryption rekeying";
1835 case CISS_LV_NOT_SUPPORTED:
1836 status = "Volume not supported on this controller";
1838 case CISS_LV_STATUS_UNAVAILABLE:
1839 status = "Volume status not available";
1842 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1843 unknown_state_str, device->volume_status);
1844 status = unknown_state_buffer;
1848 dev_info(&ctrl_info->pci_dev->dev,
1849 "scsi %d:%d:%d:%d %s\n",
1850 ctrl_info->scsi_host->host_no,
1851 device->bus, device->target, device->lun, status);
1854 static void pqi_rescan_worker(struct work_struct *work)
1856 struct pqi_ctrl_info *ctrl_info;
1858 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1861 pqi_scan_scsi_devices(ctrl_info);
1864 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1865 struct pqi_scsi_dev *device)
1869 if (pqi_is_logical_device(device))
1870 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1871 device->target, device->lun);
1873 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1878 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
1880 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1884 rc = pqi_device_wait_for_pending_io(ctrl_info, device,
1885 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1887 dev_err(&ctrl_info->pci_dev->dev,
1888 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1889 ctrl_info->scsi_host->host_no, device->bus,
1890 device->target, device->lun,
1891 atomic_read(&device->scsi_cmds_outstanding));
1893 if (pqi_is_logical_device(device))
1894 scsi_remove_device(device->sdev);
1896 pqi_remove_sas_device(device);
1898 pqi_device_remove_start(device);
1901 /* Assumes the SCSI device list lock is held. */
1903 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1904 int bus, int target, int lun)
1906 struct pqi_scsi_dev *device;
1908 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1909 if (device->bus == bus && device->target == target && device->lun == lun)
1915 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1917 if (dev1->is_physical_device != dev2->is_physical_device)
1920 if (dev1->is_physical_device)
1921 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
1923 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1926 enum pqi_find_result {
1932 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1933 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1935 struct pqi_scsi_dev *device;
1937 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1938 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1939 *matching_device = device;
1940 if (pqi_device_equal(device_to_find, device)) {
1941 if (device_to_find->volume_offline)
1942 return DEVICE_CHANGED;
1945 return DEVICE_CHANGED;
1949 return DEVICE_NOT_FOUND;
1952 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1954 if (device->is_expander_smp_device)
1955 return "Enclosure SMP ";
1957 return scsi_device_type(device->devtype);
1960 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1962 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1963 char *action, struct pqi_scsi_dev *device)
1966 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1968 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1969 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1971 if (device->target_lun_valid)
1972 count += scnprintf(buffer + count,
1973 PQI_DEV_INFO_BUFFER_LENGTH - count,
1978 count += scnprintf(buffer + count,
1979 PQI_DEV_INFO_BUFFER_LENGTH - count,
1982 if (pqi_is_logical_device(device))
1983 count += scnprintf(buffer + count,
1984 PQI_DEV_INFO_BUFFER_LENGTH - count,
1986 *((u32 *)&device->scsi3addr),
1987 *((u32 *)&device->scsi3addr[4]));
1989 count += scnprintf(buffer + count,
1990 PQI_DEV_INFO_BUFFER_LENGTH - count,
1992 get_unaligned_be64(&device->wwid[0]),
1993 get_unaligned_be64(&device->wwid[8]));
1995 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1997 pqi_device_type(device),
2001 if (pqi_is_logical_device(device)) {
2002 if (device->devtype == TYPE_DISK)
2003 count += scnprintf(buffer + count,
2004 PQI_DEV_INFO_BUFFER_LENGTH - count,
2005 "SSDSmartPathCap%c En%c %-12s",
2006 device->raid_bypass_configured ? '+' : '-',
2007 device->raid_bypass_enabled ? '+' : '-',
2008 pqi_raid_level_to_string(device->raid_level));
2010 count += scnprintf(buffer + count,
2011 PQI_DEV_INFO_BUFFER_LENGTH - count,
2012 "AIO%c", device->aio_enabled ? '+' : '-');
2013 if (device->devtype == TYPE_DISK ||
2014 device->devtype == TYPE_ZBC)
2015 count += scnprintf(buffer + count,
2016 PQI_DEV_INFO_BUFFER_LENGTH - count,
2017 " qd=%-6d", device->queue_depth);
2020 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
2023 /* Assumes the SCSI device list lock is held. */
2025 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2026 struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
2028 existing_device->device_type = new_device->device_type;
2029 existing_device->bus = new_device->bus;
2030 if (new_device->target_lun_valid) {
2031 existing_device->target = new_device->target;
2032 existing_device->lun = new_device->lun;
2033 existing_device->target_lun_valid = true;
2036 if (pqi_is_logical_device(existing_device) &&
2037 ctrl_info->logical_volume_rescan_needed)
2038 existing_device->rescan = true;
2040 /* By definition, the scsi3addr and wwid fields are already the same. */
2042 existing_device->is_physical_device = new_device->is_physical_device;
2043 existing_device->is_external_raid_device =
2044 new_device->is_external_raid_device;
2045 existing_device->is_expander_smp_device =
2046 new_device->is_expander_smp_device;
2047 existing_device->aio_enabled = new_device->aio_enabled;
2048 memcpy(existing_device->vendor, new_device->vendor,
2049 sizeof(existing_device->vendor));
2050 memcpy(existing_device->model, new_device->model,
2051 sizeof(existing_device->model));
2052 existing_device->sas_address = new_device->sas_address;
2053 existing_device->raid_level = new_device->raid_level;
2054 existing_device->queue_depth = new_device->queue_depth;
2055 existing_device->aio_handle = new_device->aio_handle;
2056 existing_device->volume_status = new_device->volume_status;
2057 existing_device->active_path_index = new_device->active_path_index;
2058 existing_device->phy_id = new_device->phy_id;
2059 existing_device->path_map = new_device->path_map;
2060 existing_device->bay = new_device->bay;
2061 existing_device->box_index = new_device->box_index;
2062 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
2063 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
2064 memcpy(existing_device->box, new_device->box,
2065 sizeof(existing_device->box));
2066 memcpy(existing_device->phys_connector, new_device->phys_connector,
2067 sizeof(existing_device->phys_connector));
2068 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
2069 kfree(existing_device->raid_map);
2070 existing_device->raid_map = new_device->raid_map;
2071 existing_device->raid_bypass_configured =
2072 new_device->raid_bypass_configured;
2073 existing_device->raid_bypass_enabled =
2074 new_device->raid_bypass_enabled;
2075 existing_device->device_offline = false;
2077 /* To prevent this from being freed later. */
2078 new_device->raid_map = NULL;
2081 static inline void pqi_free_device(struct pqi_scsi_dev *device)
2084 kfree(device->raid_map);
2090 * Called when exposing a new device to the OS fails in order to re-adjust
2091 * our internal SCSI device list to match the SCSI ML's view.
2094 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2095 struct pqi_scsi_dev *device)
2097 unsigned long flags;
2099 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2100 list_del(&device->scsi_device_list_entry);
2101 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2103 /* Allow the device structure to be freed later. */
2104 device->keep_device = false;
2107 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2109 if (device->is_expander_smp_device)
2110 return device->sas_port != NULL;
2112 return device->sdev != NULL;
2115 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2116 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2120 unsigned long flags;
2121 enum pqi_find_result find_result;
2122 struct pqi_scsi_dev *device;
2123 struct pqi_scsi_dev *next;
2124 struct pqi_scsi_dev *matching_device;
2125 LIST_HEAD(add_list);
2126 LIST_HEAD(delete_list);
2129 * The idea here is to do as little work as possible while holding the
2130 * spinlock. That's why we go to great pains to defer anything other
2131 * than updating the internal device list until after we release the
2135 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2137 /* Assume that all devices in the existing list have gone away. */
2138 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
2139 device->device_gone = true;
2141 for (i = 0; i < num_new_devices; i++) {
2142 device = new_device_list[i];
2144 find_result = pqi_scsi_find_entry(ctrl_info, device,
2147 switch (find_result) {
2150 * The newly found device is already in the existing
2153 device->new_device = false;
2154 matching_device->device_gone = false;
2155 pqi_scsi_update_device(ctrl_info, matching_device, device);
2157 case DEVICE_NOT_FOUND:
2159 * The newly found device is NOT in the existing device
2162 device->new_device = true;
2164 case DEVICE_CHANGED:
2166 * The original device has gone away and we need to add
2169 device->new_device = true;
2174 /* Process all devices that have gone away. */
2175 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2176 scsi_device_list_entry) {
2177 if (device->device_gone) {
2178 list_del(&device->scsi_device_list_entry);
2179 list_add_tail(&device->delete_list_entry, &delete_list);
2183 /* Process all new devices. */
2184 for (i = 0; i < num_new_devices; i++) {
2185 device = new_device_list[i];
2186 if (!device->new_device)
2188 if (device->volume_offline)
2190 list_add_tail(&device->scsi_device_list_entry,
2191 &ctrl_info->scsi_device_list);
2192 list_add_tail(&device->add_list_entry, &add_list);
2193 /* To prevent this device structure from being freed later. */
2194 device->keep_device = true;
2197 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2200 * If OFA is in progress and there are devices that need to be deleted,
2201 * allow any pending reset operations to continue and unblock any SCSI
2202 * requests before removal.
2204 if (pqi_ofa_in_progress(ctrl_info)) {
2205 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2206 if (pqi_is_device_added(device))
2207 pqi_device_remove_start(device);
2208 pqi_ctrl_unblock_device_reset(ctrl_info);
2209 pqi_scsi_unblock_requests(ctrl_info);
2212 /* Remove all devices that have gone away. */
2213 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2214 if (device->volume_offline) {
2215 pqi_dev_info(ctrl_info, "offline", device);
2216 pqi_show_volume_status(ctrl_info, device);
2218 pqi_dev_info(ctrl_info, "removed", device);
2220 if (pqi_is_device_added(device))
2221 pqi_remove_device(ctrl_info, device);
2222 list_del(&device->delete_list_entry);
2223 pqi_free_device(device);
2227 * Notify the SML of any existing device changes such as;
2228 * queue depth, device size.
2230 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2231 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2232 device->advertised_queue_depth = device->queue_depth;
2233 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2234 if (device->rescan) {
2235 scsi_rescan_device(&device->sdev->sdev_gendev);
2236 device->rescan = false;
2241 /* Expose any new devices. */
2242 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2243 if (!pqi_is_device_added(device)) {
2244 rc = pqi_add_device(ctrl_info, device);
2246 pqi_dev_info(ctrl_info, "added", device);
2248 dev_warn(&ctrl_info->pci_dev->dev,
2249 "scsi %d:%d:%d:%d addition failed, device not added\n",
2250 ctrl_info->scsi_host->host_no,
2251 device->bus, device->target,
2253 pqi_fixup_botched_add(ctrl_info, device);
2258 ctrl_info->logical_volume_rescan_needed = false;
2262 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2265 * Only support the HBA controller itself as a RAID
2266 * controller. If it's a RAID controller other than
2267 * the HBA itself (an external RAID controller, for
2268 * example), we don't support it.
2270 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2271 !pqi_is_hba_lunid(device->scsi3addr))
2277 static inline bool pqi_skip_device(u8 *scsi3addr)
2279 /* Ignore all masked devices. */
2280 if (MASKED_DEVICE(scsi3addr))
2286 static inline void pqi_mask_device(u8 *scsi3addr)
2288 scsi3addr[3] |= 0xc0;
2291 static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
2293 if (pqi_is_logical_device(device))
2296 return (device->path_map & (device->path_map - 1)) != 0;
2299 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2301 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2304 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2308 LIST_HEAD(new_device_list_head);
2309 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2310 struct report_log_lun_list *logdev_list = NULL;
2311 struct report_phys_lun_16byte_wwid *phys_lun;
2312 struct report_log_lun *log_lun;
2313 struct bmic_identify_physical_device *id_phys = NULL;
2316 struct pqi_scsi_dev **new_device_list = NULL;
2317 struct pqi_scsi_dev *device;
2318 struct pqi_scsi_dev *next;
2319 unsigned int num_new_devices;
2320 unsigned int num_valid_devices;
2321 bool is_physical_device;
2323 unsigned int physical_index;
2324 unsigned int logical_index;
2325 static char *out_of_memory_msg =
2326 "failed to allocate memory, device discovery stopped";
2328 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2334 get_unaligned_be32(&physdev_list->header.list_length)
2335 / sizeof(physdev_list->lun_entries[0]);
2341 get_unaligned_be32(&logdev_list->header.list_length)
2342 / sizeof(logdev_list->lun_entries[0]);
2346 if (num_physicals) {
2348 * We need this buffer for calls to pqi_get_physical_disk_info()
2349 * below. We allocate it here instead of inside
2350 * pqi_get_physical_disk_info() because it's a fairly large
2353 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2355 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2361 if (pqi_hide_vsep) {
2362 for (i = num_physicals - 1; i >= 0; i--) {
2363 phys_lun = &physdev_list->lun_entries[i];
2364 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2365 pqi_mask_device(phys_lun->lunid);
2373 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2374 ctrl_info->lv_drive_type_mix_valid = true;
2376 num_new_devices = num_physicals + num_logicals;
2378 new_device_list = kmalloc_array(num_new_devices,
2379 sizeof(*new_device_list),
2381 if (!new_device_list) {
2382 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2387 for (i = 0; i < num_new_devices; i++) {
2388 device = kzalloc(sizeof(*device), GFP_KERNEL);
2390 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2395 list_add_tail(&device->new_device_list_entry,
2396 &new_device_list_head);
2400 num_valid_devices = 0;
2404 for (i = 0; i < num_new_devices; i++) {
2406 if ((!pqi_expose_ld_first && i < num_physicals) ||
2407 (pqi_expose_ld_first && i >= num_logicals)) {
2408 is_physical_device = true;
2409 phys_lun = &physdev_list->lun_entries[physical_index++];
2411 scsi3addr = phys_lun->lunid;
2413 is_physical_device = false;
2415 log_lun = &logdev_list->lun_entries[logical_index++];
2416 scsi3addr = log_lun->lunid;
2419 if (is_physical_device && pqi_skip_device(scsi3addr))
2423 device = list_next_entry(device, new_device_list_entry);
2425 device = list_first_entry(&new_device_list_head,
2426 struct pqi_scsi_dev, new_device_list_entry);
2428 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2429 device->is_physical_device = is_physical_device;
2430 if (is_physical_device) {
2431 device->device_type = phys_lun->device_type;
2432 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2433 device->is_expander_smp_device = true;
2435 device->is_external_raid_device =
2436 pqi_is_external_raid_addr(scsi3addr);
2439 if (!pqi_is_supported_device(device))
2442 /* Do not present disks that the OS cannot fully probe */
2443 if (pqi_keep_device_offline(ctrl_info, device))
2446 /* Gather information about the device. */
2447 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2448 if (rc == -ENOMEM) {
2449 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2454 if (device->is_physical_device)
2455 dev_warn(&ctrl_info->pci_dev->dev,
2456 "obtaining device info failed, skipping physical device %016llx%016llx\n",
2457 get_unaligned_be64(&phys_lun->wwid[0]),
2458 get_unaligned_be64(&phys_lun->wwid[8]));
2460 dev_warn(&ctrl_info->pci_dev->dev,
2461 "obtaining device info failed, skipping logical device %08x%08x\n",
2462 *((u32 *)&device->scsi3addr),
2463 *((u32 *)&device->scsi3addr[4]));
2468 pqi_assign_bus_target_lun(device);
2470 if (device->is_physical_device) {
2471 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
2472 if ((phys_lun->device_flags &
2473 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2474 phys_lun->aio_handle) {
2475 device->aio_enabled = true;
2476 device->aio_handle =
2477 phys_lun->aio_handle;
2480 memcpy(device->volume_id, log_lun->volume_id,
2481 sizeof(device->volume_id));
2484 device->sas_address = get_unaligned_be64(&device->wwid[0]);
2486 new_device_list[num_valid_devices++] = device;
2489 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2492 list_for_each_entry_safe(device, next, &new_device_list_head,
2493 new_device_list_entry) {
2494 if (device->keep_device)
2496 list_del(&device->new_device_list_entry);
2497 pqi_free_device(device);
2500 kfree(new_device_list);
2501 kfree(physdev_list);
2508 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2510 unsigned long flags;
2511 struct pqi_scsi_dev *device;
2512 struct pqi_scsi_dev *next;
2514 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2515 scsi_device_list_entry) {
2516 if (pqi_is_device_added(device))
2517 pqi_remove_device(ctrl_info, device);
2518 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2519 list_del(&device->scsi_device_list_entry);
2520 pqi_free_device(device);
2521 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2525 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2530 if (pqi_ctrl_offline(ctrl_info))
2533 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2535 if (!mutex_acquired) {
2536 if (pqi_ctrl_scan_blocked(ctrl_info))
2538 pqi_schedule_rescan_worker_delayed(ctrl_info);
2539 return -EINPROGRESS;
2542 rc = pqi_update_scsi_devices(ctrl_info);
2543 if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2544 pqi_schedule_rescan_worker_delayed(ctrl_info);
2546 mutex_unlock(&ctrl_info->scan_mutex);
2551 static void pqi_scan_start(struct Scsi_Host *shost)
2553 struct pqi_ctrl_info *ctrl_info;
2555 ctrl_info = shost_to_hba(shost);
2557 pqi_scan_scsi_devices(ctrl_info);
2560 /* Returns TRUE if scan is finished. */
2562 static int pqi_scan_finished(struct Scsi_Host *shost,
2563 unsigned long elapsed_time)
2565 struct pqi_ctrl_info *ctrl_info;
2567 ctrl_info = shost_priv(shost);
2569 return !mutex_is_locked(&ctrl_info->scan_mutex);
2572 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2573 struct raid_map *raid_map, u64 first_block)
2575 u32 volume_blk_size;
2578 * Set the encryption tweak values based on logical block address.
2579 * If the block size is 512, the tweak value is equal to the LBA.
2580 * For other block sizes, tweak value is (LBA * block size) / 512.
2582 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2583 if (volume_blk_size != 512)
2584 first_block = (first_block * volume_blk_size) / 512;
2586 encryption_info->data_encryption_key_index =
2587 get_unaligned_le16(&raid_map->data_encryption_key_index);
2588 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2589 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2593 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2596 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2597 struct pqi_scsi_dev_raid_map_data *rmd)
2599 bool is_supported = true;
2601 switch (rmd->raid_level) {
2605 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2606 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2607 is_supported = false;
2609 case SA_RAID_TRIPLE:
2610 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2611 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2612 is_supported = false;
2615 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2616 rmd->data_length > ctrl_info->max_write_raid_5_6))
2617 is_supported = false;
2620 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2621 rmd->data_length > ctrl_info->max_write_raid_5_6))
2622 is_supported = false;
2625 is_supported = false;
2629 return is_supported;
2632 #define PQI_RAID_BYPASS_INELIGIBLE 1
2634 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2635 struct pqi_scsi_dev_raid_map_data *rmd)
2637 /* Check for valid opcode, get LBA and block count. */
2638 switch (scmd->cmnd[0]) {
2640 rmd->is_write = true;
2643 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2644 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2645 rmd->block_cnt = (u32)scmd->cmnd[4];
2646 if (rmd->block_cnt == 0)
2647 rmd->block_cnt = 256;
2650 rmd->is_write = true;
2653 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2654 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2657 rmd->is_write = true;
2660 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2661 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2664 rmd->is_write = true;
2667 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2668 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2671 /* Process via normal I/O path. */
2672 return PQI_RAID_BYPASS_INELIGIBLE;
2675 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2680 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2681 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2683 #if BITS_PER_LONG == 32
2687 rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2689 /* Check for invalid block or wraparound. */
2690 if (rmd->last_block >=
2691 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2692 rmd->last_block < rmd->first_block)
2693 return PQI_RAID_BYPASS_INELIGIBLE;
2695 rmd->data_disks_per_row =
2696 get_unaligned_le16(&raid_map->data_disks_per_row);
2697 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2698 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2700 /* Calculate stripe information for the request. */
2701 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2702 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2703 return PQI_RAID_BYPASS_INELIGIBLE;
2704 #if BITS_PER_LONG == 32
2705 tmpdiv = rmd->first_block;
2706 do_div(tmpdiv, rmd->blocks_per_row);
2707 rmd->first_row = tmpdiv;
2708 tmpdiv = rmd->last_block;
2709 do_div(tmpdiv, rmd->blocks_per_row);
2710 rmd->last_row = tmpdiv;
2711 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2712 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2713 tmpdiv = rmd->first_row_offset;
2714 do_div(tmpdiv, rmd->strip_size);
2715 rmd->first_column = tmpdiv;
2716 tmpdiv = rmd->last_row_offset;
2717 do_div(tmpdiv, rmd->strip_size);
2718 rmd->last_column = tmpdiv;
2720 rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2721 rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2722 rmd->first_row_offset = (u32)(rmd->first_block -
2723 (rmd->first_row * rmd->blocks_per_row));
2724 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2725 rmd->blocks_per_row));
2726 rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2727 rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2730 /* If this isn't a single row/column then give to the controller. */
2731 if (rmd->first_row != rmd->last_row ||
2732 rmd->first_column != rmd->last_column)
2733 return PQI_RAID_BYPASS_INELIGIBLE;
2735 /* Proceeding with driver mapping. */
2736 rmd->total_disks_per_row = rmd->data_disks_per_row +
2737 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2738 rmd->map_row = ((u32)(rmd->first_row >>
2739 raid_map->parity_rotation_shift)) %
2740 get_unaligned_le16(&raid_map->row_cnt);
2741 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2747 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2748 struct raid_map *raid_map)
2750 #if BITS_PER_LONG == 32
2754 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2755 return PQI_RAID_BYPASS_INELIGIBLE;
2758 /* Verify first and last block are in same RAID group. */
2759 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2760 #if BITS_PER_LONG == 32
2761 tmpdiv = rmd->first_block;
2762 rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2763 tmpdiv = rmd->first_group;
2764 do_div(tmpdiv, rmd->blocks_per_row);
2765 rmd->first_group = tmpdiv;
2766 tmpdiv = rmd->last_block;
2767 rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2768 tmpdiv = rmd->last_group;
2769 do_div(tmpdiv, rmd->blocks_per_row);
2770 rmd->last_group = tmpdiv;
2772 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2773 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2775 if (rmd->first_group != rmd->last_group)
2776 return PQI_RAID_BYPASS_INELIGIBLE;
2778 /* Verify request is in a single row of RAID 5/6. */
2779 #if BITS_PER_LONG == 32
2780 tmpdiv = rmd->first_block;
2781 do_div(tmpdiv, rmd->stripesize);
2782 rmd->first_row = tmpdiv;
2783 rmd->r5or6_first_row = tmpdiv;
2784 tmpdiv = rmd->last_block;
2785 do_div(tmpdiv, rmd->stripesize);
2786 rmd->r5or6_last_row = tmpdiv;
2788 rmd->first_row = rmd->r5or6_first_row =
2789 rmd->first_block / rmd->stripesize;
2790 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2792 if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2793 return PQI_RAID_BYPASS_INELIGIBLE;
2795 /* Verify request is in a single column. */
2796 #if BITS_PER_LONG == 32
2797 tmpdiv = rmd->first_block;
2798 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2799 tmpdiv = rmd->first_row_offset;
2800 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2801 rmd->r5or6_first_row_offset = rmd->first_row_offset;
2802 tmpdiv = rmd->last_block;
2803 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2804 tmpdiv = rmd->r5or6_last_row_offset;
2805 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2806 tmpdiv = rmd->r5or6_first_row_offset;
2807 do_div(tmpdiv, rmd->strip_size);
2808 rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2809 tmpdiv = rmd->r5or6_last_row_offset;
2810 do_div(tmpdiv, rmd->strip_size);
2811 rmd->r5or6_last_column = tmpdiv;
2813 rmd->first_row_offset = rmd->r5or6_first_row_offset =
2814 (u32)((rmd->first_block % rmd->stripesize) %
2815 rmd->blocks_per_row);
2817 rmd->r5or6_last_row_offset =
2818 (u32)((rmd->last_block % rmd->stripesize) %
2819 rmd->blocks_per_row);
2822 rmd->r5or6_first_row_offset / rmd->strip_size;
2823 rmd->r5or6_first_column = rmd->first_column;
2824 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2826 if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2827 return PQI_RAID_BYPASS_INELIGIBLE;
2829 /* Request is eligible. */
2831 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2832 get_unaligned_le16(&raid_map->row_cnt);
2834 rmd->map_index = (rmd->first_group *
2835 (get_unaligned_le16(&raid_map->row_cnt) *
2836 rmd->total_disks_per_row)) +
2837 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2839 if (rmd->is_write) {
2843 * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2844 * parity entries inside the device's raid_map.
2846 * A device's RAID map is bounded by: number of RAID disks squared.
2848 * The devices RAID map size is checked during device
2851 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2852 index *= rmd->total_disks_per_row;
2853 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2855 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2856 if (rmd->raid_level == SA_RAID_6) {
2857 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2858 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2860 #if BITS_PER_LONG == 32
2861 tmpdiv = rmd->first_block;
2862 do_div(tmpdiv, rmd->blocks_per_row);
2865 rmd->row = rmd->first_block / rmd->blocks_per_row;
2872 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2874 /* Build the new CDB for the physical disk I/O. */
2875 if (rmd->disk_block > 0xffffffff) {
2876 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2878 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2879 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2882 rmd->cdb_length = 16;
2884 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2886 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2888 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2890 rmd->cdb_length = 10;
2894 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2895 struct pqi_scsi_dev_raid_map_data *rmd)
2900 group = rmd->map_index / rmd->data_disks_per_row;
2902 index = rmd->map_index - (group * rmd->data_disks_per_row);
2903 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2904 index += rmd->data_disks_per_row;
2905 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2906 if (rmd->layout_map_count > 2) {
2907 index += rmd->data_disks_per_row;
2908 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2911 rmd->num_it_nexus_entries = rmd->layout_map_count;
2914 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2915 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2916 struct pqi_queue_group *queue_group)
2919 struct raid_map *raid_map;
2921 u32 next_bypass_group;
2922 struct pqi_encryption_info *encryption_info_ptr;
2923 struct pqi_encryption_info encryption_info;
2924 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2926 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2928 return PQI_RAID_BYPASS_INELIGIBLE;
2930 rmd.raid_level = device->raid_level;
2932 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2933 return PQI_RAID_BYPASS_INELIGIBLE;
2935 if (unlikely(rmd.block_cnt == 0))
2936 return PQI_RAID_BYPASS_INELIGIBLE;
2938 raid_map = device->raid_map;
2940 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2942 return PQI_RAID_BYPASS_INELIGIBLE;
2944 if (device->raid_level == SA_RAID_1 ||
2945 device->raid_level == SA_RAID_TRIPLE) {
2947 pqi_calc_aio_r1_nexus(raid_map, &rmd);
2949 group = device->next_bypass_group[rmd.map_index];
2950 next_bypass_group = group + 1;
2951 if (next_bypass_group >= rmd.layout_map_count)
2952 next_bypass_group = 0;
2953 device->next_bypass_group[rmd.map_index] = next_bypass_group;
2954 rmd.map_index += group * rmd.data_disks_per_row;
2956 } else if ((device->raid_level == SA_RAID_5 ||
2957 device->raid_level == SA_RAID_6) &&
2958 (rmd.layout_map_count > 1 || rmd.is_write)) {
2959 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
2961 return PQI_RAID_BYPASS_INELIGIBLE;
2964 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
2965 return PQI_RAID_BYPASS_INELIGIBLE;
2967 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
2968 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2969 rmd.first_row * rmd.strip_size +
2970 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
2971 rmd.disk_block_cnt = rmd.block_cnt;
2973 /* Handle differing logical/physical block sizes. */
2974 if (raid_map->phys_blk_shift) {
2975 rmd.disk_block <<= raid_map->phys_blk_shift;
2976 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
2979 if (unlikely(rmd.disk_block_cnt > 0xffff))
2980 return PQI_RAID_BYPASS_INELIGIBLE;
2982 pqi_set_aio_cdb(&rmd);
2984 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
2985 if (rmd.data_length > device->max_transfer_encrypted)
2986 return PQI_RAID_BYPASS_INELIGIBLE;
2987 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
2988 encryption_info_ptr = &encryption_info;
2990 encryption_info_ptr = NULL;
2994 switch (device->raid_level) {
2996 case SA_RAID_TRIPLE:
2997 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
2998 encryption_info_ptr, device, &rmd);
3001 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
3002 encryption_info_ptr, device, &rmd);
3006 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3007 rmd.cdb, rmd.cdb_length, queue_group,
3008 encryption_info_ptr, true, false);
3011 #define PQI_STATUS_IDLE 0x0
3013 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
3014 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
3016 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
3017 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
3018 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
3019 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
3020 #define PQI_DEVICE_STATE_ERROR 0x4
3022 #define PQI_MODE_READY_TIMEOUT_SECS 30
3023 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
3025 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3027 struct pqi_device_registers __iomem *pqi_registers;
3028 unsigned long timeout;
3032 pqi_registers = ctrl_info->pqi_registers;
3033 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
3036 signature = readq(&pqi_registers->signature);
3037 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3038 sizeof(signature)) == 0)
3040 if (time_after(jiffies, timeout)) {
3041 dev_err(&ctrl_info->pci_dev->dev,
3042 "timed out waiting for PQI signature\n");
3045 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3049 status = readb(&pqi_registers->function_and_status_code);
3050 if (status == PQI_STATUS_IDLE)
3052 if (time_after(jiffies, timeout)) {
3053 dev_err(&ctrl_info->pci_dev->dev,
3054 "timed out waiting for PQI IDLE\n");
3057 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3061 if (readl(&pqi_registers->device_status) ==
3062 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3064 if (time_after(jiffies, timeout)) {
3065 dev_err(&ctrl_info->pci_dev->dev,
3066 "timed out waiting for PQI all registers ready\n");
3069 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3075 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3077 struct pqi_scsi_dev *device;
3079 device = io_request->scmd->device->hostdata;
3080 device->raid_bypass_enabled = false;
3081 device->aio_enabled = false;
3084 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
3086 struct pqi_ctrl_info *ctrl_info;
3087 struct pqi_scsi_dev *device;
3089 device = sdev->hostdata;
3090 if (device->device_offline)
3093 device->device_offline = true;
3094 ctrl_info = shost_to_hba(sdev->host);
3095 pqi_schedule_rescan_worker(ctrl_info);
3096 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
3097 path, ctrl_info->scsi_host->host_no, device->bus,
3098 device->target, device->lun);
3101 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3105 struct scsi_cmnd *scmd;
3106 struct pqi_raid_error_info *error_info;
3107 size_t sense_data_length;
3110 struct scsi_sense_hdr sshdr;
3112 scmd = io_request->scmd;
3116 error_info = io_request->error_info;
3117 scsi_status = error_info->status;
3120 switch (error_info->data_out_result) {
3121 case PQI_DATA_IN_OUT_GOOD:
3123 case PQI_DATA_IN_OUT_UNDERFLOW:
3125 get_unaligned_le32(&error_info->data_out_transferred);
3126 residual_count = scsi_bufflen(scmd) - xfer_count;
3127 scsi_set_resid(scmd, residual_count);
3128 if (xfer_count < scmd->underflow)
3129 host_byte = DID_SOFT_ERROR;
3131 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3132 case PQI_DATA_IN_OUT_ABORTED:
3133 host_byte = DID_ABORT;
3135 case PQI_DATA_IN_OUT_TIMEOUT:
3136 host_byte = DID_TIME_OUT;
3138 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3139 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3140 case PQI_DATA_IN_OUT_BUFFER_ERROR:
3141 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3142 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3143 case PQI_DATA_IN_OUT_ERROR:
3144 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3145 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3146 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3147 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3148 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3149 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3150 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3151 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3152 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3153 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3155 host_byte = DID_ERROR;
3159 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3160 if (sense_data_length == 0)
3162 get_unaligned_le16(&error_info->response_data_length);
3163 if (sense_data_length) {
3164 if (sense_data_length > sizeof(error_info->data))
3165 sense_data_length = sizeof(error_info->data);
3167 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3168 scsi_normalize_sense(error_info->data,
3169 sense_data_length, &sshdr) &&
3170 sshdr.sense_key == HARDWARE_ERROR &&
3171 sshdr.asc == 0x3e) {
3172 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3173 struct pqi_scsi_dev *device = scmd->device->hostdata;
3175 switch (sshdr.ascq) {
3176 case 0x1: /* LOGICAL UNIT FAILURE */
3177 if (printk_ratelimit())
3178 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3179 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3180 pqi_take_device_offline(scmd->device, "RAID");
3181 host_byte = DID_NO_CONNECT;
3184 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3185 if (printk_ratelimit())
3186 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3187 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3192 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3193 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3194 memcpy(scmd->sense_buffer, error_info->data,
3198 scmd->result = scsi_status;
3199 set_host_byte(scmd, host_byte);
3202 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3206 struct scsi_cmnd *scmd;
3207 struct pqi_aio_error_info *error_info;
3208 size_t sense_data_length;
3211 bool device_offline;
3212 struct pqi_scsi_dev *device;
3214 scmd = io_request->scmd;
3215 error_info = io_request->error_info;
3217 sense_data_length = 0;
3218 device_offline = false;
3219 device = scmd->device->hostdata;
3221 switch (error_info->service_response) {
3222 case PQI_AIO_SERV_RESPONSE_COMPLETE:
3223 scsi_status = error_info->status;
3225 case PQI_AIO_SERV_RESPONSE_FAILURE:
3226 switch (error_info->status) {
3227 case PQI_AIO_STATUS_IO_ABORTED:
3228 scsi_status = SAM_STAT_TASK_ABORTED;
3230 case PQI_AIO_STATUS_UNDERRUN:
3231 scsi_status = SAM_STAT_GOOD;
3232 residual_count = get_unaligned_le32(
3233 &error_info->residual_count);
3234 scsi_set_resid(scmd, residual_count);
3235 xfer_count = scsi_bufflen(scmd) - residual_count;
3236 if (xfer_count < scmd->underflow)
3237 host_byte = DID_SOFT_ERROR;
3239 case PQI_AIO_STATUS_OVERRUN:
3240 scsi_status = SAM_STAT_GOOD;
3242 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3243 pqi_aio_path_disabled(io_request);
3244 if (pqi_is_multipath_device(device)) {
3245 pqi_device_remove_start(device);
3246 host_byte = DID_NO_CONNECT;
3247 scsi_status = SAM_STAT_CHECK_CONDITION;
3249 scsi_status = SAM_STAT_GOOD;
3250 io_request->status = -EAGAIN;
3253 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3254 case PQI_AIO_STATUS_INVALID_DEVICE:
3255 if (!io_request->raid_bypass) {
3256 device_offline = true;
3257 pqi_take_device_offline(scmd->device, "AIO");
3258 host_byte = DID_NO_CONNECT;
3260 scsi_status = SAM_STAT_CHECK_CONDITION;
3262 case PQI_AIO_STATUS_IO_ERROR:
3264 scsi_status = SAM_STAT_CHECK_CONDITION;
3268 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3269 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3270 scsi_status = SAM_STAT_GOOD;
3272 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3273 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3275 scsi_status = SAM_STAT_CHECK_CONDITION;
3279 if (error_info->data_present) {
3281 get_unaligned_le16(&error_info->data_length);
3282 if (sense_data_length) {
3283 if (sense_data_length > sizeof(error_info->data))
3284 sense_data_length = sizeof(error_info->data);
3285 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3286 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3287 memcpy(scmd->sense_buffer, error_info->data,
3292 if (device_offline && sense_data_length == 0)
3293 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3295 scmd->result = scsi_status;
3296 set_host_byte(scmd, host_byte);
3299 static void pqi_process_io_error(unsigned int iu_type,
3300 struct pqi_io_request *io_request)
3303 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3304 pqi_process_raid_io_error(io_request);
3306 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3307 pqi_process_aio_io_error(io_request);
3312 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3313 struct pqi_task_management_response *response)
3317 switch (response->response_code) {
3318 case SOP_TMF_COMPLETE:
3319 case SOP_TMF_FUNCTION_SUCCEEDED:
3322 case SOP_TMF_REJECTED:
3331 dev_err(&ctrl_info->pci_dev->dev,
3332 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3337 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3338 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
3340 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
3343 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3348 struct pqi_io_request *io_request;
3349 struct pqi_io_response *response;
3353 oq_ci = queue_group->oq_ci_copy;
3356 oq_pi = readl(queue_group->oq_pi);
3357 if (oq_pi >= ctrl_info->num_elements_per_oq) {
3358 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
3359 dev_err(&ctrl_info->pci_dev->dev,
3360 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3361 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3368 response = queue_group->oq_element_array +
3369 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3371 request_id = get_unaligned_le16(&response->request_id);
3372 if (request_id >= ctrl_info->max_io_slots) {
3373 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
3374 dev_err(&ctrl_info->pci_dev->dev,
3375 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
3376 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3380 io_request = &ctrl_info->io_request_pool[request_id];
3381 if (atomic_read(&io_request->refcount) == 0) {
3382 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
3383 dev_err(&ctrl_info->pci_dev->dev,
3384 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
3385 request_id, oq_pi, oq_ci);
3389 switch (response->header.iu_type) {
3390 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3391 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3392 if (io_request->scmd)
3393 io_request->scmd->result = 0;
3395 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3397 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3398 io_request->status =
3400 &((struct pqi_vendor_general_response *)response)->status);
3402 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3403 io_request->status = pqi_interpret_task_management_response(ctrl_info,
3406 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3407 pqi_aio_path_disabled(io_request);
3408 io_request->status = -EAGAIN;
3410 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3411 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3412 io_request->error_info = ctrl_info->error_buffer +
3413 (get_unaligned_le16(&response->error_index) *
3414 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3415 pqi_process_io_error(response->header.iu_type, io_request);
3418 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
3419 dev_err(&ctrl_info->pci_dev->dev,
3420 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
3421 response->header.iu_type, oq_pi, oq_ci);
3425 io_request->io_complete_callback(io_request, io_request->context);
3428 * Note that the I/O request structure CANNOT BE TOUCHED after
3429 * returning from the I/O completion callback!
3431 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3434 if (num_responses) {
3435 queue_group->oq_ci_copy = oq_ci;
3436 writel(oq_ci, queue_group->oq_ci);
3439 return num_responses;
3442 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3443 unsigned int ci, unsigned int elements_in_queue)
3445 unsigned int num_elements_used;
3448 num_elements_used = pi - ci;
3450 num_elements_used = elements_in_queue - ci + pi;
3452 return elements_in_queue - num_elements_used - 1;
3455 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3456 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3460 unsigned long flags;
3462 struct pqi_queue_group *queue_group;
3464 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3465 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3468 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3470 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3471 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3473 if (pqi_num_elements_free(iq_pi, iq_ci,
3474 ctrl_info->num_elements_per_iq))
3477 spin_unlock_irqrestore(
3478 &queue_group->submit_lock[RAID_PATH], flags);
3480 if (pqi_ctrl_offline(ctrl_info))
3484 next_element = queue_group->iq_element_array[RAID_PATH] +
3485 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3487 memcpy(next_element, iu, iu_length);
3489 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3490 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3493 * This write notifies the controller that an IU is available to be
3496 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3498 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3501 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3502 struct pqi_event *event)
3504 struct pqi_event_acknowledge_request request;
3506 memset(&request, 0, sizeof(request));
3508 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3509 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3510 &request.header.iu_length);
3511 request.event_type = event->event_type;
3512 put_unaligned_le16(event->event_id, &request.event_id);
3513 put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3515 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3518 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3519 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3521 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3522 struct pqi_ctrl_info *ctrl_info)
3525 unsigned long timeout;
3527 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
3530 status = pqi_read_soft_reset_status(ctrl_info);
3531 if (status & PQI_SOFT_RESET_INITIATE)
3532 return RESET_INITIATE_DRIVER;
3534 if (status & PQI_SOFT_RESET_ABORT)
3537 if (!sis_is_firmware_running(ctrl_info))
3538 return RESET_NORESPONSE;
3540 if (time_after(jiffies, timeout)) {
3541 dev_warn(&ctrl_info->pci_dev->dev,
3542 "timed out waiting for soft reset status\n");
3543 return RESET_TIMEDOUT;
3546 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3550 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3553 unsigned int delay_secs;
3554 enum pqi_soft_reset_status reset_status;
3556 if (ctrl_info->soft_reset_handshake_supported)
3557 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3559 reset_status = RESET_INITIATE_FIRMWARE;
3561 delay_secs = PQI_POST_RESET_DELAY_SECS;
3563 switch (reset_status) {
3564 case RESET_TIMEDOUT:
3565 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3567 case RESET_INITIATE_DRIVER:
3568 dev_info(&ctrl_info->pci_dev->dev,
3569 "Online Firmware Activation: resetting controller\n");
3570 sis_soft_reset(ctrl_info);
3572 case RESET_INITIATE_FIRMWARE:
3573 ctrl_info->pqi_mode_enabled = false;
3574 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3575 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3576 pqi_ofa_free_host_buffer(ctrl_info);
3577 pqi_ctrl_ofa_done(ctrl_info);
3578 dev_info(&ctrl_info->pci_dev->dev,
3579 "Online Firmware Activation: %s\n",
3580 rc == 0 ? "SUCCESS" : "FAILED");
3583 dev_info(&ctrl_info->pci_dev->dev,
3584 "Online Firmware Activation ABORTED\n");
3585 if (ctrl_info->soft_reset_handshake_supported)
3586 pqi_clear_soft_reset_status(ctrl_info);
3587 pqi_ofa_free_host_buffer(ctrl_info);
3588 pqi_ctrl_ofa_done(ctrl_info);
3589 pqi_ofa_ctrl_unquiesce(ctrl_info);
3591 case RESET_NORESPONSE:
3594 dev_err(&ctrl_info->pci_dev->dev,
3595 "unexpected Online Firmware Activation reset status: 0x%x\n",
3597 pqi_ofa_free_host_buffer(ctrl_info);
3598 pqi_ctrl_ofa_done(ctrl_info);
3599 pqi_ofa_ctrl_unquiesce(ctrl_info);
3600 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
3605 static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3607 struct pqi_ctrl_info *ctrl_info;
3609 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3611 pqi_ctrl_ofa_start(ctrl_info);
3612 pqi_ofa_setup_host_buffer(ctrl_info);
3613 pqi_ofa_host_memory_update(ctrl_info);
3616 static void pqi_ofa_quiesce_worker(struct work_struct *work)
3618 struct pqi_ctrl_info *ctrl_info;
3619 struct pqi_event *event;
3621 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3623 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3625 pqi_ofa_ctrl_quiesce(ctrl_info);
3626 pqi_acknowledge_event(ctrl_info, event);
3627 pqi_process_soft_reset(ctrl_info);
3630 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3631 struct pqi_event *event)
3637 switch (event->event_id) {
3638 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3639 dev_info(&ctrl_info->pci_dev->dev,
3640 "received Online Firmware Activation memory allocation request\n");
3641 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3643 case PQI_EVENT_OFA_QUIESCE:
3644 dev_info(&ctrl_info->pci_dev->dev,
3645 "received Online Firmware Activation quiesce request\n");
3646 schedule_work(&ctrl_info->ofa_quiesce_work);
3649 case PQI_EVENT_OFA_CANCELED:
3650 dev_info(&ctrl_info->pci_dev->dev,
3651 "received Online Firmware Activation cancel request: reason: %u\n",
3652 ctrl_info->ofa_cancel_reason);
3653 pqi_ofa_free_host_buffer(ctrl_info);
3654 pqi_ctrl_ofa_done(ctrl_info);
3657 dev_err(&ctrl_info->pci_dev->dev,
3658 "received unknown Online Firmware Activation request: event ID: %u\n",
3666 static void pqi_event_worker(struct work_struct *work)
3670 struct pqi_ctrl_info *ctrl_info;
3671 struct pqi_event *event;
3674 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3676 pqi_ctrl_busy(ctrl_info);
3677 pqi_wait_if_ctrl_blocked(ctrl_info);
3678 if (pqi_ctrl_offline(ctrl_info))
3681 rescan_needed = false;
3682 event = ctrl_info->events;
3683 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3684 if (event->pending) {
3685 event->pending = false;
3686 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3687 ack_event = pqi_ofa_process_event(ctrl_info, event);
3690 rescan_needed = true;
3691 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
3692 ctrl_info->logical_volume_rescan_needed = true;
3695 pqi_acknowledge_event(ctrl_info, event);
3701 pqi_schedule_rescan_worker_delayed(ctrl_info);
3704 pqi_ctrl_unbusy(ctrl_info);
3707 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
3709 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3712 u32 heartbeat_count;
3713 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
3715 pqi_check_ctrl_health(ctrl_info);
3716 if (pqi_ctrl_offline(ctrl_info))
3719 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3720 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3722 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3723 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3724 dev_err(&ctrl_info->pci_dev->dev,
3725 "no heartbeat detected - last heartbeat count: %u\n",
3727 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
3731 ctrl_info->previous_num_interrupts = num_interrupts;
3734 ctrl_info->previous_heartbeat_count = heartbeat_count;
3735 mod_timer(&ctrl_info->heartbeat_timer,
3736 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3739 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3741 if (!ctrl_info->heartbeat_counter)
3744 ctrl_info->previous_num_interrupts =
3745 atomic_read(&ctrl_info->num_interrupts);
3746 ctrl_info->previous_heartbeat_count =
3747 pqi_read_heartbeat_counter(ctrl_info);
3749 ctrl_info->heartbeat_timer.expires =
3750 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3751 add_timer(&ctrl_info->heartbeat_timer);
3754 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3756 del_timer_sync(&ctrl_info->heartbeat_timer);
3759 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3760 struct pqi_event *event, struct pqi_event_response *response)
3762 switch (event->event_id) {
3763 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3764 ctrl_info->ofa_bytes_requested =
3765 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3767 case PQI_EVENT_OFA_CANCELED:
3768 ctrl_info->ofa_cancel_reason =
3769 get_unaligned_le16(&response->data.ofa_cancelled.reason);
3774 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3779 struct pqi_event_queue *event_queue;
3780 struct pqi_event_response *response;
3781 struct pqi_event *event;
3784 event_queue = &ctrl_info->event_queue;
3786 oq_ci = event_queue->oq_ci_copy;
3789 oq_pi = readl(event_queue->oq_pi);
3790 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3791 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
3792 dev_err(&ctrl_info->pci_dev->dev,
3793 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3794 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3802 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3804 event_index = pqi_event_type_to_event_index(response->event_type);
3806 if (event_index >= 0 && response->request_acknowledge) {
3807 event = &ctrl_info->events[event_index];
3808 event->pending = true;
3809 event->event_type = response->event_type;
3810 event->event_id = get_unaligned_le16(&response->event_id);
3811 event->additional_event_id =
3812 get_unaligned_le32(&response->additional_event_id);
3813 if (event->event_type == PQI_EVENT_TYPE_OFA)
3814 pqi_ofa_capture_event_payload(ctrl_info, event, response);
3817 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3821 event_queue->oq_ci_copy = oq_ci;
3822 writel(oq_ci, event_queue->oq_ci);
3823 schedule_work(&ctrl_info->event_work);
3829 #define PQI_LEGACY_INTX_MASK 0x1
3831 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3834 struct pqi_device_registers __iomem *pqi_registers;
3835 volatile void __iomem *register_addr;
3837 pqi_registers = ctrl_info->pqi_registers;
3840 register_addr = &pqi_registers->legacy_intx_mask_clear;
3842 register_addr = &pqi_registers->legacy_intx_mask_set;
3844 intx_mask = readl(register_addr);
3845 intx_mask |= PQI_LEGACY_INTX_MASK;
3846 writel(intx_mask, register_addr);
3849 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3850 enum pqi_irq_mode new_mode)
3852 switch (ctrl_info->irq_mode) {
3858 pqi_configure_legacy_intx(ctrl_info, true);
3859 sis_enable_intx(ctrl_info);
3868 pqi_configure_legacy_intx(ctrl_info, false);
3869 sis_enable_msix(ctrl_info);
3874 pqi_configure_legacy_intx(ctrl_info, false);
3881 sis_enable_msix(ctrl_info);
3884 pqi_configure_legacy_intx(ctrl_info, true);
3885 sis_enable_intx(ctrl_info);
3893 ctrl_info->irq_mode = new_mode;
3896 #define PQI_LEGACY_INTX_PENDING 0x1
3898 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3903 switch (ctrl_info->irq_mode) {
3908 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
3909 if (intx_status & PQI_LEGACY_INTX_PENDING)
3923 static irqreturn_t pqi_irq_handler(int irq, void *data)
3925 struct pqi_ctrl_info *ctrl_info;
3926 struct pqi_queue_group *queue_group;
3927 int num_io_responses_handled;
3928 int num_events_handled;
3931 ctrl_info = queue_group->ctrl_info;
3933 if (!pqi_is_valid_irq(ctrl_info))
3936 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3937 if (num_io_responses_handled < 0)
3940 if (irq == ctrl_info->event_irq) {
3941 num_events_handled = pqi_process_event_intr(ctrl_info);
3942 if (num_events_handled < 0)
3945 num_events_handled = 0;
3948 if (num_io_responses_handled + num_events_handled > 0)
3949 atomic_inc(&ctrl_info->num_interrupts);
3951 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3952 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3958 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3960 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3964 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3966 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3967 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3968 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3970 dev_err(&pci_dev->dev,
3971 "irq %u init failed with error %d\n",
3972 pci_irq_vector(pci_dev, i), rc);
3975 ctrl_info->num_msix_vectors_initialized++;
3981 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3985 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3986 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3987 &ctrl_info->queue_groups[i]);
3989 ctrl_info->num_msix_vectors_initialized = 0;
3992 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3994 int num_vectors_enabled;
3996 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3997 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3998 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3999 if (num_vectors_enabled < 0) {
4000 dev_err(&ctrl_info->pci_dev->dev,
4001 "MSI-X init failed with error %d\n",
4002 num_vectors_enabled);
4003 return num_vectors_enabled;
4006 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
4007 ctrl_info->irq_mode = IRQ_MODE_MSIX;
4011 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4013 if (ctrl_info->num_msix_vectors_enabled) {
4014 pci_free_irq_vectors(ctrl_info->pci_dev);
4015 ctrl_info->num_msix_vectors_enabled = 0;
4019 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4022 size_t alloc_length;
4023 size_t element_array_length_per_iq;
4024 size_t element_array_length_per_oq;
4025 void *element_array;
4026 void __iomem *next_queue_index;
4027 void *aligned_pointer;
4028 unsigned int num_inbound_queues;
4029 unsigned int num_outbound_queues;
4030 unsigned int num_queue_indexes;
4031 struct pqi_queue_group *queue_group;
4033 element_array_length_per_iq =
4034 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4035 ctrl_info->num_elements_per_iq;
4036 element_array_length_per_oq =
4037 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4038 ctrl_info->num_elements_per_oq;
4039 num_inbound_queues = ctrl_info->num_queue_groups * 2;
4040 num_outbound_queues = ctrl_info->num_queue_groups;
4041 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4043 aligned_pointer = NULL;
4045 for (i = 0; i < num_inbound_queues; i++) {
4046 aligned_pointer = PTR_ALIGN(aligned_pointer,
4047 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4048 aligned_pointer += element_array_length_per_iq;
4051 for (i = 0; i < num_outbound_queues; i++) {
4052 aligned_pointer = PTR_ALIGN(aligned_pointer,
4053 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4054 aligned_pointer += element_array_length_per_oq;
4057 aligned_pointer = PTR_ALIGN(aligned_pointer,
4058 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4059 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4060 PQI_EVENT_OQ_ELEMENT_LENGTH;
4062 for (i = 0; i < num_queue_indexes; i++) {
4063 aligned_pointer = PTR_ALIGN(aligned_pointer,
4064 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4065 aligned_pointer += sizeof(pqi_index_t);
4068 alloc_length = (size_t)aligned_pointer +
4069 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4071 alloc_length += PQI_EXTRA_SGL_MEMORY;
4073 ctrl_info->queue_memory_base =
4074 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4075 &ctrl_info->queue_memory_base_dma_handle,
4078 if (!ctrl_info->queue_memory_base)
4081 ctrl_info->queue_memory_length = alloc_length;
4083 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4084 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4086 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4087 queue_group = &ctrl_info->queue_groups[i];
4088 queue_group->iq_element_array[RAID_PATH] = element_array;
4089 queue_group->iq_element_array_bus_addr[RAID_PATH] =
4090 ctrl_info->queue_memory_base_dma_handle +
4091 (element_array - ctrl_info->queue_memory_base);
4092 element_array += element_array_length_per_iq;
4093 element_array = PTR_ALIGN(element_array,
4094 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4095 queue_group->iq_element_array[AIO_PATH] = element_array;
4096 queue_group->iq_element_array_bus_addr[AIO_PATH] =
4097 ctrl_info->queue_memory_base_dma_handle +
4098 (element_array - ctrl_info->queue_memory_base);
4099 element_array += element_array_length_per_iq;
4100 element_array = PTR_ALIGN(element_array,
4101 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4104 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4105 queue_group = &ctrl_info->queue_groups[i];
4106 queue_group->oq_element_array = element_array;
4107 queue_group->oq_element_array_bus_addr =
4108 ctrl_info->queue_memory_base_dma_handle +
4109 (element_array - ctrl_info->queue_memory_base);
4110 element_array += element_array_length_per_oq;
4111 element_array = PTR_ALIGN(element_array,
4112 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4115 ctrl_info->event_queue.oq_element_array = element_array;
4116 ctrl_info->event_queue.oq_element_array_bus_addr =
4117 ctrl_info->queue_memory_base_dma_handle +
4118 (element_array - ctrl_info->queue_memory_base);
4119 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4120 PQI_EVENT_OQ_ELEMENT_LENGTH;
4122 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
4123 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4125 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4126 queue_group = &ctrl_info->queue_groups[i];
4127 queue_group->iq_ci[RAID_PATH] = next_queue_index;
4128 queue_group->iq_ci_bus_addr[RAID_PATH] =
4129 ctrl_info->queue_memory_base_dma_handle +
4131 (void __iomem *)ctrl_info->queue_memory_base);
4132 next_queue_index += sizeof(pqi_index_t);
4133 next_queue_index = PTR_ALIGN(next_queue_index,
4134 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4135 queue_group->iq_ci[AIO_PATH] = next_queue_index;
4136 queue_group->iq_ci_bus_addr[AIO_PATH] =
4137 ctrl_info->queue_memory_base_dma_handle +
4139 (void __iomem *)ctrl_info->queue_memory_base);
4140 next_queue_index += sizeof(pqi_index_t);
4141 next_queue_index = PTR_ALIGN(next_queue_index,
4142 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4143 queue_group->oq_pi = next_queue_index;
4144 queue_group->oq_pi_bus_addr =
4145 ctrl_info->queue_memory_base_dma_handle +
4147 (void __iomem *)ctrl_info->queue_memory_base);
4148 next_queue_index += sizeof(pqi_index_t);
4149 next_queue_index = PTR_ALIGN(next_queue_index,
4150 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4153 ctrl_info->event_queue.oq_pi = next_queue_index;
4154 ctrl_info->event_queue.oq_pi_bus_addr =
4155 ctrl_info->queue_memory_base_dma_handle +
4157 (void __iomem *)ctrl_info->queue_memory_base);
4162 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4165 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4166 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4169 * Initialize the backpointers to the controller structure in
4170 * each operational queue group structure.
4172 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4173 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4176 * Assign IDs to all operational queues. Note that the IDs
4177 * assigned to operational IQs are independent of the IDs
4178 * assigned to operational OQs.
4180 ctrl_info->event_queue.oq_id = next_oq_id++;
4181 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4182 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4183 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4184 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4188 * Assign MSI-X table entry indexes to all queues. Note that the
4189 * interrupt for the event queue is shared with the first queue group.
4191 ctrl_info->event_queue.int_msg_num = 0;
4192 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4193 ctrl_info->queue_groups[i].int_msg_num = i;
4195 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4196 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4197 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4198 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4199 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4203 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4205 size_t alloc_length;
4206 struct pqi_admin_queues_aligned *admin_queues_aligned;
4207 struct pqi_admin_queues *admin_queues;
4209 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4210 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4212 ctrl_info->admin_queue_memory_base =
4213 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4214 &ctrl_info->admin_queue_memory_base_dma_handle,
4217 if (!ctrl_info->admin_queue_memory_base)
4220 ctrl_info->admin_queue_memory_length = alloc_length;
4222 admin_queues = &ctrl_info->admin_queues;
4223 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4224 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4225 admin_queues->iq_element_array =
4226 &admin_queues_aligned->iq_element_array;
4227 admin_queues->oq_element_array =
4228 &admin_queues_aligned->oq_element_array;
4229 admin_queues->iq_ci =
4230 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4231 admin_queues->oq_pi =
4232 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4234 admin_queues->iq_element_array_bus_addr =
4235 ctrl_info->admin_queue_memory_base_dma_handle +
4236 (admin_queues->iq_element_array -
4237 ctrl_info->admin_queue_memory_base);
4238 admin_queues->oq_element_array_bus_addr =
4239 ctrl_info->admin_queue_memory_base_dma_handle +
4240 (admin_queues->oq_element_array -
4241 ctrl_info->admin_queue_memory_base);
4242 admin_queues->iq_ci_bus_addr =
4243 ctrl_info->admin_queue_memory_base_dma_handle +
4244 ((void __iomem *)admin_queues->iq_ci -
4245 (void __iomem *)ctrl_info->admin_queue_memory_base);
4246 admin_queues->oq_pi_bus_addr =
4247 ctrl_info->admin_queue_memory_base_dma_handle +
4248 ((void __iomem *)admin_queues->oq_pi -
4249 (void __iomem *)ctrl_info->admin_queue_memory_base);
4254 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
4255 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
4257 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4259 struct pqi_device_registers __iomem *pqi_registers;
4260 struct pqi_admin_queues *admin_queues;
4261 unsigned long timeout;
4265 pqi_registers = ctrl_info->pqi_registers;
4266 admin_queues = &ctrl_info->admin_queues;
4268 writeq((u64)admin_queues->iq_element_array_bus_addr,
4269 &pqi_registers->admin_iq_element_array_addr);
4270 writeq((u64)admin_queues->oq_element_array_bus_addr,
4271 &pqi_registers->admin_oq_element_array_addr);
4272 writeq((u64)admin_queues->iq_ci_bus_addr,
4273 &pqi_registers->admin_iq_ci_addr);
4274 writeq((u64)admin_queues->oq_pi_bus_addr,
4275 &pqi_registers->admin_oq_pi_addr);
4277 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4278 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4279 (admin_queues->int_msg_num << 16);
4280 writel(reg, &pqi_registers->admin_iq_num_elements);
4282 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4283 &pqi_registers->function_and_status_code);
4285 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4287 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4288 status = readb(&pqi_registers->function_and_status_code);
4289 if (status == PQI_STATUS_IDLE)
4291 if (time_after(jiffies, timeout))
4296 * The offset registers are not initialized to the correct
4297 * offsets until *after* the create admin queue pair command
4298 * completes successfully.
4300 admin_queues->iq_pi = ctrl_info->iomem_base +
4301 PQI_DEVICE_REGISTERS_OFFSET +
4302 readq(&pqi_registers->admin_iq_pi_offset);
4303 admin_queues->oq_ci = ctrl_info->iomem_base +
4304 PQI_DEVICE_REGISTERS_OFFSET +
4305 readq(&pqi_registers->admin_oq_ci_offset);
4310 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4311 struct pqi_general_admin_request *request)
4313 struct pqi_admin_queues *admin_queues;
4317 admin_queues = &ctrl_info->admin_queues;
4318 iq_pi = admin_queues->iq_pi_copy;
4320 next_element = admin_queues->iq_element_array +
4321 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4323 memcpy(next_element, request, sizeof(*request));
4325 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4326 admin_queues->iq_pi_copy = iq_pi;
4329 * This write notifies the controller that an IU is available to be
4332 writel(iq_pi, admin_queues->iq_pi);
4335 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
4337 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4338 struct pqi_general_admin_response *response)
4340 struct pqi_admin_queues *admin_queues;
4343 unsigned long timeout;
4345 admin_queues = &ctrl_info->admin_queues;
4346 oq_ci = admin_queues->oq_ci_copy;
4348 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
4351 oq_pi = readl(admin_queues->oq_pi);
4354 if (time_after(jiffies, timeout)) {
4355 dev_err(&ctrl_info->pci_dev->dev,
4356 "timed out waiting for admin response\n");
4359 if (!sis_is_firmware_running(ctrl_info))
4361 usleep_range(1000, 2000);
4364 memcpy(response, admin_queues->oq_element_array +
4365 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4367 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4368 admin_queues->oq_ci_copy = oq_ci;
4369 writel(oq_ci, admin_queues->oq_ci);
4374 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4375 struct pqi_queue_group *queue_group, enum pqi_io_path path,
4376 struct pqi_io_request *io_request)
4378 struct pqi_io_request *next;
4383 unsigned long flags;
4384 unsigned int num_elements_needed;
4385 unsigned int num_elements_to_end_of_queue;
4387 struct pqi_iu_header *request;
4389 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4392 io_request->queue_group = queue_group;
4393 list_add_tail(&io_request->request_list_entry,
4394 &queue_group->request_list[path]);
4397 iq_pi = queue_group->iq_pi_copy[path];
4399 list_for_each_entry_safe(io_request, next,
4400 &queue_group->request_list[path], request_list_entry) {
4402 request = io_request->iu;
4404 iu_length = get_unaligned_le16(&request->iu_length) +
4405 PQI_REQUEST_HEADER_LENGTH;
4406 num_elements_needed =
4407 DIV_ROUND_UP(iu_length,
4408 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4410 iq_ci = readl(queue_group->iq_ci[path]);
4412 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4413 ctrl_info->num_elements_per_iq))
4416 put_unaligned_le16(queue_group->oq_id,
4417 &request->response_queue_id);
4419 next_element = queue_group->iq_element_array[path] +
4420 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4422 num_elements_to_end_of_queue =
4423 ctrl_info->num_elements_per_iq - iq_pi;
4425 if (num_elements_needed <= num_elements_to_end_of_queue) {
4426 memcpy(next_element, request, iu_length);
4428 copy_count = num_elements_to_end_of_queue *
4429 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4430 memcpy(next_element, request, copy_count);
4431 memcpy(queue_group->iq_element_array[path],
4432 (u8 *)request + copy_count,
4433 iu_length - copy_count);
4436 iq_pi = (iq_pi + num_elements_needed) %
4437 ctrl_info->num_elements_per_iq;
4439 list_del(&io_request->request_list_entry);
4442 if (iq_pi != queue_group->iq_pi_copy[path]) {
4443 queue_group->iq_pi_copy[path] = iq_pi;
4445 * This write notifies the controller that one or more IUs are
4446 * available to be processed.
4448 writel(iq_pi, queue_group->iq_pi[path]);
4451 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4454 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4456 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4457 struct completion *wait)
4462 if (wait_for_completion_io_timeout(wait,
4463 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
4468 pqi_check_ctrl_health(ctrl_info);
4469 if (pqi_ctrl_offline(ctrl_info)) {
4478 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4481 struct completion *waiting = context;
4486 static int pqi_process_raid_io_error_synchronous(
4487 struct pqi_raid_error_info *error_info)
4491 switch (error_info->data_out_result) {
4492 case PQI_DATA_IN_OUT_GOOD:
4493 if (error_info->status == SAM_STAT_GOOD)
4496 case PQI_DATA_IN_OUT_UNDERFLOW:
4497 if (error_info->status == SAM_STAT_GOOD ||
4498 error_info->status == SAM_STAT_CHECK_CONDITION)
4501 case PQI_DATA_IN_OUT_ABORTED:
4502 rc = PQI_CMD_STATUS_ABORTED;
4509 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4511 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4514 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4515 struct pqi_iu_header *request, unsigned int flags,
4516 struct pqi_raid_error_info *error_info)
4519 struct pqi_io_request *io_request;
4521 DECLARE_COMPLETION_ONSTACK(wait);
4523 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4524 if (down_interruptible(&ctrl_info->sync_request_sem))
4525 return -ERESTARTSYS;
4527 down(&ctrl_info->sync_request_sem);
4530 pqi_ctrl_busy(ctrl_info);
4532 * Wait for other admin queue updates such as;
4533 * config table changes, OFA memory updates, ...
4535 if (pqi_is_blockable_request(request))
4536 pqi_wait_if_ctrl_blocked(ctrl_info);
4538 if (pqi_ctrl_offline(ctrl_info)) {
4543 io_request = pqi_alloc_io_request(ctrl_info);
4545 put_unaligned_le16(io_request->index,
4546 &(((struct pqi_raid_path_request *)request)->request_id));
4548 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4549 ((struct pqi_raid_path_request *)request)->error_index =
4550 ((struct pqi_raid_path_request *)request)->request_id;
4552 iu_length = get_unaligned_le16(&request->iu_length) +
4553 PQI_REQUEST_HEADER_LENGTH;
4554 memcpy(io_request->iu, request, iu_length);
4556 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4557 io_request->context = &wait;
4559 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4562 pqi_wait_for_completion_io(ctrl_info, &wait);
4565 if (io_request->error_info)
4566 memcpy(error_info, io_request->error_info, sizeof(*error_info));
4568 memset(error_info, 0, sizeof(*error_info));
4569 } else if (rc == 0 && io_request->error_info) {
4570 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4573 pqi_free_io_request(io_request);
4576 pqi_ctrl_unbusy(ctrl_info);
4577 up(&ctrl_info->sync_request_sem);
4582 static int pqi_validate_admin_response(
4583 struct pqi_general_admin_response *response, u8 expected_function_code)
4585 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4588 if (get_unaligned_le16(&response->header.iu_length) !=
4589 PQI_GENERAL_ADMIN_IU_LENGTH)
4592 if (response->function_code != expected_function_code)
4595 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4601 static int pqi_submit_admin_request_synchronous(
4602 struct pqi_ctrl_info *ctrl_info,
4603 struct pqi_general_admin_request *request,
4604 struct pqi_general_admin_response *response)
4608 pqi_submit_admin_request(ctrl_info, request);
4610 rc = pqi_poll_for_admin_response(ctrl_info, response);
4613 rc = pqi_validate_admin_response(response, request->function_code);
4618 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4621 struct pqi_general_admin_request request;
4622 struct pqi_general_admin_response response;
4623 struct pqi_device_capability *capability;
4624 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4626 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4630 memset(&request, 0, sizeof(request));
4632 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4633 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4634 &request.header.iu_length);
4635 request.function_code =
4636 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4637 put_unaligned_le32(sizeof(*capability),
4638 &request.data.report_device_capability.buffer_length);
4640 rc = pqi_map_single(ctrl_info->pci_dev,
4641 &request.data.report_device_capability.sg_descriptor,
4642 capability, sizeof(*capability),
4647 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4649 pqi_pci_unmap(ctrl_info->pci_dev,
4650 &request.data.report_device_capability.sg_descriptor, 1,
4656 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4661 ctrl_info->max_inbound_queues =
4662 get_unaligned_le16(&capability->max_inbound_queues);
4663 ctrl_info->max_elements_per_iq =
4664 get_unaligned_le16(&capability->max_elements_per_iq);
4665 ctrl_info->max_iq_element_length =
4666 get_unaligned_le16(&capability->max_iq_element_length)
4668 ctrl_info->max_outbound_queues =
4669 get_unaligned_le16(&capability->max_outbound_queues);
4670 ctrl_info->max_elements_per_oq =
4671 get_unaligned_le16(&capability->max_elements_per_oq);
4672 ctrl_info->max_oq_element_length =
4673 get_unaligned_le16(&capability->max_oq_element_length)
4676 sop_iu_layer_descriptor =
4677 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4679 ctrl_info->max_inbound_iu_length_per_firmware =
4681 &sop_iu_layer_descriptor->max_inbound_iu_length);
4682 ctrl_info->inbound_spanning_supported =
4683 sop_iu_layer_descriptor->inbound_spanning_supported;
4684 ctrl_info->outbound_spanning_supported =
4685 sop_iu_layer_descriptor->outbound_spanning_supported;
4693 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4695 if (ctrl_info->max_iq_element_length <
4696 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4697 dev_err(&ctrl_info->pci_dev->dev,
4698 "max. inbound queue element length of %d is less than the required length of %d\n",
4699 ctrl_info->max_iq_element_length,
4700 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4704 if (ctrl_info->max_oq_element_length <
4705 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4706 dev_err(&ctrl_info->pci_dev->dev,
4707 "max. outbound queue element length of %d is less than the required length of %d\n",
4708 ctrl_info->max_oq_element_length,
4709 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4713 if (ctrl_info->max_inbound_iu_length_per_firmware <
4714 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4715 dev_err(&ctrl_info->pci_dev->dev,
4716 "max. inbound IU length of %u is less than the min. required length of %d\n",
4717 ctrl_info->max_inbound_iu_length_per_firmware,
4718 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4722 if (!ctrl_info->inbound_spanning_supported) {
4723 dev_err(&ctrl_info->pci_dev->dev,
4724 "the controller does not support inbound spanning\n");
4728 if (ctrl_info->outbound_spanning_supported) {
4729 dev_err(&ctrl_info->pci_dev->dev,
4730 "the controller supports outbound spanning but this driver does not\n");
4737 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4740 struct pqi_event_queue *event_queue;
4741 struct pqi_general_admin_request request;
4742 struct pqi_general_admin_response response;
4744 event_queue = &ctrl_info->event_queue;
4747 * Create OQ (Outbound Queue - device to host queue) to dedicate
4750 memset(&request, 0, sizeof(request));
4751 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4752 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4753 &request.header.iu_length);
4754 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4755 put_unaligned_le16(event_queue->oq_id,
4756 &request.data.create_operational_oq.queue_id);
4757 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4758 &request.data.create_operational_oq.element_array_addr);
4759 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4760 &request.data.create_operational_oq.pi_addr);
4761 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4762 &request.data.create_operational_oq.num_elements);
4763 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4764 &request.data.create_operational_oq.element_length);
4765 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4766 put_unaligned_le16(event_queue->int_msg_num,
4767 &request.data.create_operational_oq.int_msg_num);
4769 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4774 event_queue->oq_ci = ctrl_info->iomem_base +
4775 PQI_DEVICE_REGISTERS_OFFSET +
4777 &response.data.create_operational_oq.oq_ci_offset);
4782 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4783 unsigned int group_number)
4786 struct pqi_queue_group *queue_group;
4787 struct pqi_general_admin_request request;
4788 struct pqi_general_admin_response response;
4790 queue_group = &ctrl_info->queue_groups[group_number];
4793 * Create IQ (Inbound Queue - host to device queue) for
4796 memset(&request, 0, sizeof(request));
4797 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4798 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4799 &request.header.iu_length);
4800 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4801 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4802 &request.data.create_operational_iq.queue_id);
4804 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4805 &request.data.create_operational_iq.element_array_addr);
4806 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4807 &request.data.create_operational_iq.ci_addr);
4808 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4809 &request.data.create_operational_iq.num_elements);
4810 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4811 &request.data.create_operational_iq.element_length);
4812 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4814 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4817 dev_err(&ctrl_info->pci_dev->dev,
4818 "error creating inbound RAID queue\n");
4822 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4823 PQI_DEVICE_REGISTERS_OFFSET +
4825 &response.data.create_operational_iq.iq_pi_offset);
4828 * Create IQ (Inbound Queue - host to device queue) for
4829 * Advanced I/O (AIO) path.
4831 memset(&request, 0, sizeof(request));
4832 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4833 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4834 &request.header.iu_length);
4835 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4836 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4837 &request.data.create_operational_iq.queue_id);
4838 put_unaligned_le64((u64)queue_group->
4839 iq_element_array_bus_addr[AIO_PATH],
4840 &request.data.create_operational_iq.element_array_addr);
4841 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4842 &request.data.create_operational_iq.ci_addr);
4843 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4844 &request.data.create_operational_iq.num_elements);
4845 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4846 &request.data.create_operational_iq.element_length);
4847 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4849 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4852 dev_err(&ctrl_info->pci_dev->dev,
4853 "error creating inbound AIO queue\n");
4857 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4858 PQI_DEVICE_REGISTERS_OFFSET +
4860 &response.data.create_operational_iq.iq_pi_offset);
4863 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4864 * assumed to be for RAID path I/O unless we change the queue's
4867 memset(&request, 0, sizeof(request));
4868 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4869 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4870 &request.header.iu_length);
4871 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4872 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4873 &request.data.change_operational_iq_properties.queue_id);
4874 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4875 &request.data.change_operational_iq_properties.vendor_specific);
4877 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4880 dev_err(&ctrl_info->pci_dev->dev,
4881 "error changing queue property\n");
4886 * Create OQ (Outbound Queue - device to host queue).
4888 memset(&request, 0, sizeof(request));
4889 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4890 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4891 &request.header.iu_length);
4892 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4893 put_unaligned_le16(queue_group->oq_id,
4894 &request.data.create_operational_oq.queue_id);
4895 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4896 &request.data.create_operational_oq.element_array_addr);
4897 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4898 &request.data.create_operational_oq.pi_addr);
4899 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4900 &request.data.create_operational_oq.num_elements);
4901 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4902 &request.data.create_operational_oq.element_length);
4903 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4904 put_unaligned_le16(queue_group->int_msg_num,
4905 &request.data.create_operational_oq.int_msg_num);
4907 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4910 dev_err(&ctrl_info->pci_dev->dev,
4911 "error creating outbound queue\n");
4915 queue_group->oq_ci = ctrl_info->iomem_base +
4916 PQI_DEVICE_REGISTERS_OFFSET +
4918 &response.data.create_operational_oq.oq_ci_offset);
4923 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4928 rc = pqi_create_event_queue(ctrl_info);
4930 dev_err(&ctrl_info->pci_dev->dev,
4931 "error creating event queue\n");
4935 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4936 rc = pqi_create_queue_group(ctrl_info, i);
4938 dev_err(&ctrl_info->pci_dev->dev,
4939 "error creating queue group number %u/%u\n",
4940 i, ctrl_info->num_queue_groups);
4948 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4949 struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
4951 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4956 struct pqi_event_config *event_config;
4957 struct pqi_event_descriptor *event_descriptor;
4958 struct pqi_general_management_request request;
4960 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4965 memset(&request, 0, sizeof(request));
4967 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4968 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4969 data.report_event_configuration.sg_descriptors[1]) -
4970 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4971 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4972 &request.data.report_event_configuration.buffer_length);
4974 rc = pqi_map_single(ctrl_info->pci_dev,
4975 request.data.report_event_configuration.sg_descriptors,
4976 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4981 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
4983 pqi_pci_unmap(ctrl_info->pci_dev,
4984 request.data.report_event_configuration.sg_descriptors, 1,
4990 for (i = 0; i < event_config->num_event_descriptors; i++) {
4991 event_descriptor = &event_config->descriptors[i];
4992 if (enable_events &&
4993 pqi_is_supported_event(event_descriptor->event_type))
4994 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4995 &event_descriptor->oq_id);
4997 put_unaligned_le16(0, &event_descriptor->oq_id);
5000 memset(&request, 0, sizeof(request));
5002 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
5003 put_unaligned_le16(offsetof(struct pqi_general_management_request,
5004 data.report_event_configuration.sg_descriptors[1]) -
5005 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5006 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5007 &request.data.report_event_configuration.buffer_length);
5009 rc = pqi_map_single(ctrl_info->pci_dev,
5010 request.data.report_event_configuration.sg_descriptors,
5011 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5016 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5018 pqi_pci_unmap(ctrl_info->pci_dev,
5019 request.data.report_event_configuration.sg_descriptors, 1,
5023 kfree(event_config);
5028 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5030 return pqi_configure_events(ctrl_info, true);
5033 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5037 size_t sg_chain_buffer_length;
5038 struct pqi_io_request *io_request;
5040 if (!ctrl_info->io_request_pool)
5043 dev = &ctrl_info->pci_dev->dev;
5044 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5045 io_request = ctrl_info->io_request_pool;
5047 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5048 kfree(io_request->iu);
5049 if (!io_request->sg_chain_buffer)
5051 dma_free_coherent(dev, sg_chain_buffer_length,
5052 io_request->sg_chain_buffer,
5053 io_request->sg_chain_buffer_dma_handle);
5057 kfree(ctrl_info->io_request_pool);
5058 ctrl_info->io_request_pool = NULL;
5061 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5063 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5064 ctrl_info->error_buffer_length,
5065 &ctrl_info->error_buffer_dma_handle,
5067 if (!ctrl_info->error_buffer)
5073 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5076 void *sg_chain_buffer;
5077 size_t sg_chain_buffer_length;
5078 dma_addr_t sg_chain_buffer_dma_handle;
5080 struct pqi_io_request *io_request;
5082 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5083 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
5085 if (!ctrl_info->io_request_pool) {
5086 dev_err(&ctrl_info->pci_dev->dev,
5087 "failed to allocate I/O request pool\n");
5091 dev = &ctrl_info->pci_dev->dev;
5092 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5093 io_request = ctrl_info->io_request_pool;
5095 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5096 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
5098 if (!io_request->iu) {
5099 dev_err(&ctrl_info->pci_dev->dev,
5100 "failed to allocate IU buffers\n");
5104 sg_chain_buffer = dma_alloc_coherent(dev,
5105 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5108 if (!sg_chain_buffer) {
5109 dev_err(&ctrl_info->pci_dev->dev,
5110 "failed to allocate PQI scatter-gather chain buffers\n");
5114 io_request->index = i;
5115 io_request->sg_chain_buffer = sg_chain_buffer;
5116 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
5123 pqi_free_all_io_requests(ctrl_info);
5129 * Calculate required resources that are sized based on max. outstanding
5130 * requests and max. transfer size.
5133 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5135 u32 max_transfer_size;
5138 ctrl_info->scsi_ml_can_queue =
5139 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5140 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5142 ctrl_info->error_buffer_length =
5143 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5146 max_transfer_size = min(ctrl_info->max_transfer_size,
5147 PQI_MAX_TRANSFER_SIZE_KDUMP);
5149 max_transfer_size = min(ctrl_info->max_transfer_size,
5150 PQI_MAX_TRANSFER_SIZE);
5152 max_sg_entries = max_transfer_size / PAGE_SIZE;
5154 /* +1 to cover when the buffer is not page-aligned. */
5157 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5159 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5161 ctrl_info->sg_chain_buffer_length =
5162 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5163 PQI_EXTRA_SGL_MEMORY;
5164 ctrl_info->sg_tablesize = max_sg_entries;
5165 ctrl_info->max_sectors = max_transfer_size / 512;
5168 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5170 int num_queue_groups;
5171 u16 num_elements_per_iq;
5172 u16 num_elements_per_oq;
5174 if (reset_devices) {
5175 num_queue_groups = 1;
5178 int max_queue_groups;
5180 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5181 ctrl_info->max_outbound_queues - 1);
5182 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
5184 num_cpus = num_online_cpus();
5185 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
5186 num_queue_groups = min(num_queue_groups, max_queue_groups);
5189 ctrl_info->num_queue_groups = num_queue_groups;
5190 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
5193 * Make sure that the max. inbound IU length is an even multiple
5194 * of our inbound element length.
5196 ctrl_info->max_inbound_iu_length =
5197 (ctrl_info->max_inbound_iu_length_per_firmware /
5198 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5199 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
5201 num_elements_per_iq =
5202 (ctrl_info->max_inbound_iu_length /
5203 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5205 /* Add one because one element in each queue is unusable. */
5206 num_elements_per_iq++;
5208 num_elements_per_iq = min(num_elements_per_iq,
5209 ctrl_info->max_elements_per_iq);
5211 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5212 num_elements_per_oq = min(num_elements_per_oq,
5213 ctrl_info->max_elements_per_oq);
5215 ctrl_info->num_elements_per_iq = num_elements_per_iq;
5216 ctrl_info->num_elements_per_oq = num_elements_per_oq;
5218 ctrl_info->max_sg_per_iu =
5219 ((ctrl_info->max_inbound_iu_length -
5220 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5221 sizeof(struct pqi_sg_descriptor)) +
5222 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5224 ctrl_info->max_sg_per_r56_iu =
5225 ((ctrl_info->max_inbound_iu_length -
5226 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5227 sizeof(struct pqi_sg_descriptor)) +
5228 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5231 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5232 struct scatterlist *sg)
5234 u64 address = (u64)sg_dma_address(sg);
5235 unsigned int length = sg_dma_len(sg);
5237 put_unaligned_le64(address, &sg_descriptor->address);
5238 put_unaligned_le32(length, &sg_descriptor->length);
5239 put_unaligned_le32(0, &sg_descriptor->flags);
5242 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5243 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5244 int max_sg_per_iu, bool *chained)
5247 unsigned int num_sg_in_iu;
5252 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
5255 pqi_set_sg_descriptor(sg_descriptor, sg);
5262 if (i == max_sg_per_iu) {
5263 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5264 &sg_descriptor->address);
5265 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5266 &sg_descriptor->length);
5267 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5270 sg_descriptor = io_request->sg_chain_buffer;
5275 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5277 return num_sg_in_iu;
5280 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5281 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5282 struct pqi_io_request *io_request)
5287 unsigned int num_sg_in_iu;
5288 struct scatterlist *sg;
5289 struct pqi_sg_descriptor *sg_descriptor;
5291 sg_count = scsi_dma_map(scmd);
5295 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5296 PQI_REQUEST_HEADER_LENGTH;
5301 sg = scsi_sglist(scmd);
5302 sg_descriptor = request->sg_descriptors;
5304 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5305 ctrl_info->max_sg_per_iu, &chained);
5307 request->partial = chained;
5308 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5311 put_unaligned_le16(iu_length, &request->header.iu_length);
5316 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5317 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5318 struct pqi_io_request *io_request)
5323 unsigned int num_sg_in_iu;
5324 struct scatterlist *sg;
5325 struct pqi_sg_descriptor *sg_descriptor;
5327 sg_count = scsi_dma_map(scmd);
5331 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5332 PQI_REQUEST_HEADER_LENGTH;
5338 sg = scsi_sglist(scmd);
5339 sg_descriptor = request->sg_descriptors;
5341 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5342 ctrl_info->max_sg_per_iu, &chained);
5344 request->partial = chained;
5345 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5348 put_unaligned_le16(iu_length, &request->header.iu_length);
5349 request->num_sg_descriptors = num_sg_in_iu;
5354 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5355 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5356 struct pqi_io_request *io_request)
5361 unsigned int num_sg_in_iu;
5362 struct scatterlist *sg;
5363 struct pqi_sg_descriptor *sg_descriptor;
5365 sg_count = scsi_dma_map(scmd);
5369 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5370 PQI_REQUEST_HEADER_LENGTH;
5373 if (sg_count != 0) {
5374 sg = scsi_sglist(scmd);
5375 sg_descriptor = request->sg_descriptors;
5377 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5378 ctrl_info->max_sg_per_r56_iu, &chained);
5380 request->partial = chained;
5381 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5384 put_unaligned_le16(iu_length, &request->header.iu_length);
5385 request->num_sg_descriptors = num_sg_in_iu;
5390 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5391 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5392 struct pqi_io_request *io_request)
5397 unsigned int num_sg_in_iu;
5398 struct scatterlist *sg;
5399 struct pqi_sg_descriptor *sg_descriptor;
5401 sg_count = scsi_dma_map(scmd);
5405 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5406 PQI_REQUEST_HEADER_LENGTH;
5412 sg = scsi_sglist(scmd);
5413 sg_descriptor = request->sg_descriptors;
5415 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5416 ctrl_info->max_sg_per_iu, &chained);
5418 request->partial = chained;
5419 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5422 put_unaligned_le16(iu_length, &request->header.iu_length);
5423 request->num_sg_descriptors = num_sg_in_iu;
5428 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5431 struct scsi_cmnd *scmd;
5433 scmd = io_request->scmd;
5434 pqi_free_io_request(io_request);
5435 scsi_dma_unmap(scmd);
5436 pqi_scsi_done(scmd);
5439 static int pqi_raid_submit_scsi_cmd_with_io_request(
5440 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
5441 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5442 struct pqi_queue_group *queue_group)
5446 struct pqi_raid_path_request *request;
5448 io_request->io_complete_callback = pqi_raid_io_complete;
5449 io_request->scmd = scmd;
5451 request = io_request->iu;
5452 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5454 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5455 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5456 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5457 put_unaligned_le16(io_request->index, &request->request_id);
5458 request->error_index = request->request_id;
5459 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5461 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5462 memcpy(request->cdb, scmd->cmnd, cdb_length);
5464 switch (cdb_length) {
5469 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5472 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5475 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5478 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5482 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5486 switch (scmd->sc_data_direction) {
5488 request->data_direction = SOP_READ_FLAG;
5490 case DMA_FROM_DEVICE:
5491 request->data_direction = SOP_WRITE_FLAG;
5494 request->data_direction = SOP_NO_DIRECTION_FLAG;
5496 case DMA_BIDIRECTIONAL:
5497 request->data_direction = SOP_BIDIRECTIONAL;
5500 dev_err(&ctrl_info->pci_dev->dev,
5501 "unknown data direction: %d\n",
5502 scmd->sc_data_direction);
5506 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5508 pqi_free_io_request(io_request);
5509 return SCSI_MLQUEUE_HOST_BUSY;
5512 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5517 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5518 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5519 struct pqi_queue_group *queue_group)
5521 struct pqi_io_request *io_request;
5523 io_request = pqi_alloc_io_request(ctrl_info);
5525 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5526 device, scmd, queue_group);
5529 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5531 struct scsi_cmnd *scmd;
5532 struct pqi_scsi_dev *device;
5533 struct pqi_ctrl_info *ctrl_info;
5535 if (!io_request->raid_bypass)
5538 scmd = io_request->scmd;
5539 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5541 if (host_byte(scmd->result) == DID_NO_CONNECT)
5544 device = scmd->device->hostdata;
5545 if (pqi_device_offline(device) || pqi_device_in_remove(device))
5548 ctrl_info = shost_to_hba(scmd->device->host);
5549 if (pqi_ctrl_offline(ctrl_info))
5555 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5558 struct scsi_cmnd *scmd;
5560 scmd = io_request->scmd;
5561 scsi_dma_unmap(scmd);
5562 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5563 set_host_byte(scmd, DID_IMM_RETRY);
5564 pqi_cmd_priv(scmd)->this_residual++;
5567 pqi_free_io_request(io_request);
5568 pqi_scsi_done(scmd);
5571 static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info,
5572 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
5577 io_high_prio = false;
5579 if (device->ncq_prio_enable) {
5581 IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
5582 if (priority_class == IOPRIO_CLASS_RT) {
5583 /* Set NCQ priority for read/write commands. */
5584 switch (scmd->cmnd[0]) {
5593 io_high_prio = true;
5599 return io_high_prio;
5602 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5603 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5604 struct pqi_queue_group *queue_group)
5608 io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd);
5610 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5611 scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5612 false, io_high_prio);
5615 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5616 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5617 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5618 struct pqi_encryption_info *encryption_info, bool raid_bypass,
5622 struct pqi_io_request *io_request;
5623 struct pqi_aio_path_request *request;
5625 io_request = pqi_alloc_io_request(ctrl_info);
5626 io_request->io_complete_callback = pqi_aio_io_complete;
5627 io_request->scmd = scmd;
5628 io_request->raid_bypass = raid_bypass;
5630 request = io_request->iu;
5631 memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
5633 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5634 put_unaligned_le32(aio_handle, &request->nexus_id);
5635 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5636 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5637 request->command_priority = io_high_prio;
5638 put_unaligned_le16(io_request->index, &request->request_id);
5639 request->error_index = request->request_id;
5640 if (cdb_length > sizeof(request->cdb))
5641 cdb_length = sizeof(request->cdb);
5642 request->cdb_length = cdb_length;
5643 memcpy(request->cdb, cdb, cdb_length);
5645 switch (scmd->sc_data_direction) {
5647 request->data_direction = SOP_READ_FLAG;
5649 case DMA_FROM_DEVICE:
5650 request->data_direction = SOP_WRITE_FLAG;
5653 request->data_direction = SOP_NO_DIRECTION_FLAG;
5655 case DMA_BIDIRECTIONAL:
5656 request->data_direction = SOP_BIDIRECTIONAL;
5659 dev_err(&ctrl_info->pci_dev->dev,
5660 "unknown data direction: %d\n",
5661 scmd->sc_data_direction);
5665 if (encryption_info) {
5666 request->encryption_enable = true;
5667 put_unaligned_le16(encryption_info->data_encryption_key_index,
5668 &request->data_encryption_key_index);
5669 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5670 &request->encrypt_tweak_lower);
5671 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5672 &request->encrypt_tweak_upper);
5675 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5677 pqi_free_io_request(io_request);
5678 return SCSI_MLQUEUE_HOST_BUSY;
5681 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5686 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5687 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5688 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5689 struct pqi_scsi_dev_raid_map_data *rmd)
5692 struct pqi_io_request *io_request;
5693 struct pqi_aio_r1_path_request *r1_request;
5695 io_request = pqi_alloc_io_request(ctrl_info);
5696 io_request->io_complete_callback = pqi_aio_io_complete;
5697 io_request->scmd = scmd;
5698 io_request->raid_bypass = true;
5700 r1_request = io_request->iu;
5701 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5703 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5704 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5705 r1_request->num_drives = rmd->num_it_nexus_entries;
5706 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5707 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5708 if (rmd->num_it_nexus_entries == 3)
5709 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5711 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5712 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5713 put_unaligned_le16(io_request->index, &r1_request->request_id);
5714 r1_request->error_index = r1_request->request_id;
5715 if (rmd->cdb_length > sizeof(r1_request->cdb))
5716 rmd->cdb_length = sizeof(r1_request->cdb);
5717 r1_request->cdb_length = rmd->cdb_length;
5718 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5720 /* The direction is always write. */
5721 r1_request->data_direction = SOP_READ_FLAG;
5723 if (encryption_info) {
5724 r1_request->encryption_enable = true;
5725 put_unaligned_le16(encryption_info->data_encryption_key_index,
5726 &r1_request->data_encryption_key_index);
5727 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5728 &r1_request->encrypt_tweak_lower);
5729 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5730 &r1_request->encrypt_tweak_upper);
5733 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5735 pqi_free_io_request(io_request);
5736 return SCSI_MLQUEUE_HOST_BUSY;
5739 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5744 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5745 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5746 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5747 struct pqi_scsi_dev_raid_map_data *rmd)
5750 struct pqi_io_request *io_request;
5751 struct pqi_aio_r56_path_request *r56_request;
5753 io_request = pqi_alloc_io_request(ctrl_info);
5754 io_request->io_complete_callback = pqi_aio_io_complete;
5755 io_request->scmd = scmd;
5756 io_request->raid_bypass = true;
5758 r56_request = io_request->iu;
5759 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5761 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5762 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5764 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5766 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5767 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5768 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5769 if (rmd->raid_level == SA_RAID_6) {
5770 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5771 r56_request->xor_multiplier = rmd->xor_mult;
5773 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5774 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5775 put_unaligned_le64(rmd->row, &r56_request->row);
5777 put_unaligned_le16(io_request->index, &r56_request->request_id);
5778 r56_request->error_index = r56_request->request_id;
5780 if (rmd->cdb_length > sizeof(r56_request->cdb))
5781 rmd->cdb_length = sizeof(r56_request->cdb);
5782 r56_request->cdb_length = rmd->cdb_length;
5783 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5785 /* The direction is always write. */
5786 r56_request->data_direction = SOP_READ_FLAG;
5788 if (encryption_info) {
5789 r56_request->encryption_enable = true;
5790 put_unaligned_le16(encryption_info->data_encryption_key_index,
5791 &r56_request->data_encryption_key_index);
5792 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5793 &r56_request->encrypt_tweak_lower);
5794 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5795 &r56_request->encrypt_tweak_upper);
5798 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5800 pqi_free_io_request(io_request);
5801 return SCSI_MLQUEUE_HOST_BUSY;
5804 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5809 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5810 struct scsi_cmnd *scmd)
5814 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
5815 if (hw_queue > ctrl_info->max_hw_queue_index)
5821 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5823 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5826 return pqi_cmd_priv(scmd)->this_residual == 0;
5830 * This function gets called just before we hand the completed SCSI request
5834 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5836 struct pqi_scsi_dev *device;
5838 if (!scmd->device) {
5839 set_host_byte(scmd, DID_NO_CONNECT);
5843 device = scmd->device->hostdata;
5845 set_host_byte(scmd, DID_NO_CONNECT);
5849 atomic_dec(&device->scsi_cmds_outstanding);
5852 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5853 struct scsi_cmnd *scmd)
5859 struct pqi_scsi_dev *device;
5860 struct pqi_stream_data *pqi_stream_data;
5861 struct pqi_scsi_dev_raid_map_data rmd;
5863 if (!ctrl_info->enable_stream_detection)
5866 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5870 /* Check writes only. */
5874 device = scmd->device->hostdata;
5876 /* Check for RAID 5/6 streams. */
5877 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5881 * If controller does not support AIO RAID{5,6} writes, need to send
5882 * requests down non-AIO path.
5884 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5885 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5889 oldest_jiffies = INT_MAX;
5890 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5891 pqi_stream_data = &device->stream_data[i];
5893 * Check for adjacent request or request is within
5894 * the previous request.
5896 if ((pqi_stream_data->next_lba &&
5897 rmd.first_block >= pqi_stream_data->next_lba) &&
5898 rmd.first_block <= pqi_stream_data->next_lba +
5900 pqi_stream_data->next_lba = rmd.first_block +
5902 pqi_stream_data->last_accessed = jiffies;
5907 if (pqi_stream_data->last_accessed == 0) {
5912 /* Find entry with oldest last accessed time. */
5913 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
5914 oldest_jiffies = pqi_stream_data->last_accessed;
5919 /* Set LRU entry. */
5920 pqi_stream_data = &device->stream_data[lru_index];
5921 pqi_stream_data->last_accessed = jiffies;
5922 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
5927 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5930 struct pqi_ctrl_info *ctrl_info;
5931 struct pqi_scsi_dev *device;
5933 struct pqi_queue_group *queue_group;
5936 device = scmd->device->hostdata;
5939 set_host_byte(scmd, DID_NO_CONNECT);
5940 pqi_scsi_done(scmd);
5944 atomic_inc(&device->scsi_cmds_outstanding);
5946 ctrl_info = shost_to_hba(shost);
5948 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
5949 set_host_byte(scmd, DID_NO_CONNECT);
5950 pqi_scsi_done(scmd);
5954 if (pqi_ctrl_blocked(ctrl_info)) {
5955 rc = SCSI_MLQUEUE_HOST_BUSY;
5960 * This is necessary because the SML doesn't zero out this field during
5965 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5966 queue_group = &ctrl_info->queue_groups[hw_queue];
5968 if (pqi_is_logical_device(device)) {
5969 raid_bypassed = false;
5970 if (device->raid_bypass_enabled &&
5971 pqi_is_bypass_eligible_request(scmd) &&
5972 !pqi_is_parity_write_stream(ctrl_info, scmd)) {
5973 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5974 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
5975 raid_bypassed = true;
5976 atomic_inc(&device->raid_bypass_cnt);
5980 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5982 if (device->aio_enabled)
5983 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5985 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5990 atomic_dec(&device->scsi_cmds_outstanding);
5995 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
5999 unsigned long flags;
6000 unsigned int queued_io_count;
6001 struct pqi_queue_group *queue_group;
6002 struct pqi_io_request *io_request;
6004 queued_io_count = 0;
6006 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6007 queue_group = &ctrl_info->queue_groups[i];
6008 for (path = 0; path < 2; path++) {
6009 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
6010 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
6012 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
6016 return queued_io_count;
6019 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
6023 unsigned int nonempty_inbound_queue_count;
6024 struct pqi_queue_group *queue_group;
6028 nonempty_inbound_queue_count = 0;
6030 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6031 queue_group = &ctrl_info->queue_groups[i];
6032 for (path = 0; path < 2; path++) {
6033 iq_pi = queue_group->iq_pi_copy[path];
6034 iq_ci = readl(queue_group->iq_ci[path]);
6036 nonempty_inbound_queue_count++;
6040 return nonempty_inbound_queue_count;
6043 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10
6045 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6047 unsigned long start_jiffies;
6048 unsigned long warning_timeout;
6049 unsigned int queued_io_count;
6050 unsigned int nonempty_inbound_queue_count;
6051 bool displayed_warning;
6053 displayed_warning = false;
6054 start_jiffies = jiffies;
6055 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6058 queued_io_count = pqi_queued_io_count(ctrl_info);
6059 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6060 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6062 pqi_check_ctrl_health(ctrl_info);
6063 if (pqi_ctrl_offline(ctrl_info))
6065 if (time_after(jiffies, warning_timeout)) {
6066 dev_warn(&ctrl_info->pci_dev->dev,
6067 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6068 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6069 displayed_warning = true;
6070 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6072 usleep_range(1000, 2000);
6075 if (displayed_warning)
6076 dev_warn(&ctrl_info->pci_dev->dev,
6077 "queued I/O drained after waiting for %u seconds\n",
6078 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6083 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6084 struct pqi_scsi_dev *device)
6088 struct pqi_queue_group *queue_group;
6089 unsigned long flags;
6090 struct pqi_io_request *io_request;
6091 struct pqi_io_request *next;
6092 struct scsi_cmnd *scmd;
6093 struct pqi_scsi_dev *scsi_device;
6095 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6096 queue_group = &ctrl_info->queue_groups[i];
6098 for (path = 0; path < 2; path++) {
6100 &queue_group->submit_lock[path], flags);
6102 list_for_each_entry_safe(io_request, next,
6103 &queue_group->request_list[path],
6104 request_list_entry) {
6106 scmd = io_request->scmd;
6110 scsi_device = scmd->device->hostdata;
6111 if (scsi_device != device)
6114 list_del(&io_request->request_list_entry);
6115 set_host_byte(scmd, DID_RESET);
6116 pqi_free_io_request(io_request);
6117 scsi_dma_unmap(scmd);
6118 pqi_scsi_done(scmd);
6121 spin_unlock_irqrestore(
6122 &queue_group->submit_lock[path], flags);
6127 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
6129 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
6130 struct pqi_scsi_dev *device, unsigned long timeout_msecs)
6132 int cmds_outstanding;
6133 unsigned long start_jiffies;
6134 unsigned long warning_timeout;
6135 unsigned long msecs_waiting;
6137 start_jiffies = jiffies;
6138 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6140 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) {
6141 pqi_check_ctrl_health(ctrl_info);
6142 if (pqi_ctrl_offline(ctrl_info))
6144 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6145 if (msecs_waiting >= timeout_msecs) {
6146 dev_err(&ctrl_info->pci_dev->dev,
6147 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6148 ctrl_info->scsi_host->host_no, device->bus, device->target,
6149 device->lun, msecs_waiting / 1000, cmds_outstanding);
6152 if (time_after(jiffies, warning_timeout)) {
6153 dev_warn(&ctrl_info->pci_dev->dev,
6154 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6155 ctrl_info->scsi_host->host_no, device->bus, device->target,
6156 device->lun, msecs_waiting / 1000, cmds_outstanding);
6157 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6159 usleep_range(1000, 2000);
6165 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6168 struct completion *waiting = context;
6173 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
6175 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
6176 struct pqi_scsi_dev *device, struct completion *wait)
6179 unsigned int wait_secs;
6180 int cmds_outstanding;
6185 if (wait_for_completion_io_timeout(wait,
6186 PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
6191 pqi_check_ctrl_health(ctrl_info);
6192 if (pqi_ctrl_offline(ctrl_info)) {
6197 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
6198 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding);
6199 dev_warn(&ctrl_info->pci_dev->dev,
6200 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
6201 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, wait_secs, cmds_outstanding);
6207 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
6209 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
6212 struct pqi_io_request *io_request;
6213 DECLARE_COMPLETION_ONSTACK(wait);
6214 struct pqi_task_management_request *request;
6216 io_request = pqi_alloc_io_request(ctrl_info);
6217 io_request->io_complete_callback = pqi_lun_reset_complete;
6218 io_request->context = &wait;
6220 request = io_request->iu;
6221 memset(request, 0, sizeof(*request));
6223 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6224 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6225 &request->header.iu_length);
6226 put_unaligned_le16(io_request->index, &request->request_id);
6227 memcpy(request->lun_number, device->scsi3addr,
6228 sizeof(request->lun_number));
6229 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
6230 if (ctrl_info->tmf_iu_timeout_supported)
6231 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6233 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6236 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
6238 rc = io_request->status;
6240 pqi_free_io_request(io_request);
6245 #define PQI_LUN_RESET_RETRIES 3
6246 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
6247 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
6248 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
6250 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
6254 unsigned int retries;
6255 unsigned long timeout_msecs;
6257 for (retries = 0;;) {
6258 reset_rc = pqi_lun_reset(ctrl_info, device);
6259 if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
6261 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6264 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6265 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
6267 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs);
6268 if (wait_rc && reset_rc == 0)
6271 return reset_rc == 0 ? SUCCESS : FAILED;
6274 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
6275 struct pqi_scsi_dev *device)
6279 pqi_ctrl_block_requests(ctrl_info);
6280 pqi_ctrl_wait_until_quiesced(ctrl_info);
6281 pqi_fail_io_queued_for_device(ctrl_info, device);
6282 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6286 rc = pqi_lun_reset_with_retries(ctrl_info, device);
6287 pqi_ctrl_unblock_requests(ctrl_info);
6292 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6295 struct Scsi_Host *shost;
6296 struct pqi_ctrl_info *ctrl_info;
6297 struct pqi_scsi_dev *device;
6299 shost = scmd->device->host;
6300 ctrl_info = shost_to_hba(shost);
6301 device = scmd->device->hostdata;
6303 mutex_lock(&ctrl_info->lun_reset_mutex);
6305 dev_err(&ctrl_info->pci_dev->dev,
6306 "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
6308 device->bus, device->target, device->lun,
6309 scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
6311 pqi_check_ctrl_health(ctrl_info);
6312 if (pqi_ctrl_offline(ctrl_info))
6315 rc = pqi_device_reset(ctrl_info, device);
6317 dev_err(&ctrl_info->pci_dev->dev,
6318 "reset of scsi %d:%d:%d:%d: %s\n",
6319 shost->host_no, device->bus, device->target, device->lun,
6320 rc == SUCCESS ? "SUCCESS" : "FAILED");
6322 mutex_unlock(&ctrl_info->lun_reset_mutex);
6327 static int pqi_slave_alloc(struct scsi_device *sdev)
6329 struct pqi_scsi_dev *device;
6330 unsigned long flags;
6331 struct pqi_ctrl_info *ctrl_info;
6332 struct scsi_target *starget;
6333 struct sas_rphy *rphy;
6335 ctrl_info = shost_to_hba(sdev->host);
6337 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6339 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6340 starget = scsi_target(sdev);
6341 rphy = target_to_rphy(starget);
6342 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6344 if (device->target_lun_valid) {
6345 device->ignore_device = true;
6347 device->target = sdev_id(sdev);
6348 device->lun = sdev->lun;
6349 device->target_lun_valid = true;
6353 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6354 sdev_id(sdev), sdev->lun);
6358 sdev->hostdata = device;
6359 device->sdev = sdev;
6360 if (device->queue_depth) {
6361 device->advertised_queue_depth = device->queue_depth;
6362 scsi_change_queue_depth(sdev,
6363 device->advertised_queue_depth);
6365 if (pqi_is_logical_device(device)) {
6366 pqi_disable_write_same(sdev);
6368 sdev->allow_restart = 1;
6369 if (device->device_type == SA_DEVICE_TYPE_NVME)
6370 pqi_disable_write_same(sdev);
6374 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6379 static int pqi_map_queues(struct Scsi_Host *shost)
6381 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6383 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6384 ctrl_info->pci_dev, 0);
6387 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6389 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6392 static int pqi_slave_configure(struct scsi_device *sdev)
6395 struct pqi_scsi_dev *device;
6397 device = sdev->hostdata;
6398 device->devtype = sdev->type;
6400 if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6402 device->ignore_device = false;
6408 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6410 struct pci_dev *pci_dev;
6411 u32 subsystem_vendor;
6412 u32 subsystem_device;
6413 cciss_pci_info_struct pciinfo;
6418 pci_dev = ctrl_info->pci_dev;
6420 pciinfo.domain = pci_domain_nr(pci_dev->bus);
6421 pciinfo.bus = pci_dev->bus->number;
6422 pciinfo.dev_fn = pci_dev->devfn;
6423 subsystem_vendor = pci_dev->subsystem_vendor;
6424 subsystem_device = pci_dev->subsystem_device;
6425 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6427 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
6433 static int pqi_getdrivver_ioctl(void __user *arg)
6440 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6441 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6443 if (copy_to_user(arg, &version, sizeof(version)))
6449 struct ciss_error_info {
6452 size_t sense_data_length;
6455 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6456 struct ciss_error_info *ciss_error_info)
6458 int ciss_cmd_status;
6459 size_t sense_data_length;
6461 switch (pqi_error_info->data_out_result) {
6462 case PQI_DATA_IN_OUT_GOOD:
6463 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6465 case PQI_DATA_IN_OUT_UNDERFLOW:
6466 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6468 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6469 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6471 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6472 case PQI_DATA_IN_OUT_BUFFER_ERROR:
6473 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6474 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6475 case PQI_DATA_IN_OUT_ERROR:
6476 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6478 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6479 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6480 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6481 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6482 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6483 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6484 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6485 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6486 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6487 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6488 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6490 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6491 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6493 case PQI_DATA_IN_OUT_ABORTED:
6494 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6496 case PQI_DATA_IN_OUT_TIMEOUT:
6497 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6500 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6505 get_unaligned_le16(&pqi_error_info->sense_data_length);
6506 if (sense_data_length == 0)
6508 get_unaligned_le16(&pqi_error_info->response_data_length);
6509 if (sense_data_length)
6510 if (sense_data_length > sizeof(pqi_error_info->data))
6511 sense_data_length = sizeof(pqi_error_info->data);
6513 ciss_error_info->scsi_status = pqi_error_info->status;
6514 ciss_error_info->command_status = ciss_cmd_status;
6515 ciss_error_info->sense_data_length = sense_data_length;
6518 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6521 char *kernel_buffer = NULL;
6523 size_t sense_data_length;
6524 IOCTL_Command_struct iocommand;
6525 struct pqi_raid_path_request request;
6526 struct pqi_raid_error_info pqi_error_info;
6527 struct ciss_error_info ciss_error_info;
6529 if (pqi_ctrl_offline(ctrl_info))
6531 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6535 if (!capable(CAP_SYS_RAWIO))
6537 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6539 if (iocommand.buf_size < 1 &&
6540 iocommand.Request.Type.Direction != XFER_NONE)
6542 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6544 if (iocommand.Request.Type.Type != TYPE_CMD)
6547 switch (iocommand.Request.Type.Direction) {
6551 case XFER_READ | XFER_WRITE:
6557 if (iocommand.buf_size > 0) {
6558 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6561 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6562 if (copy_from_user(kernel_buffer, iocommand.buf,
6563 iocommand.buf_size)) {
6568 memset(kernel_buffer, 0, iocommand.buf_size);
6572 memset(&request, 0, sizeof(request));
6574 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6575 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6576 PQI_REQUEST_HEADER_LENGTH;
6577 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6578 sizeof(request.lun_number));
6579 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6580 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6582 switch (iocommand.Request.Type.Direction) {
6584 request.data_direction = SOP_NO_DIRECTION_FLAG;
6587 request.data_direction = SOP_WRITE_FLAG;
6590 request.data_direction = SOP_READ_FLAG;
6592 case XFER_READ | XFER_WRITE:
6593 request.data_direction = SOP_BIDIRECTIONAL;
6597 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6599 if (iocommand.buf_size > 0) {
6600 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6602 rc = pqi_map_single(ctrl_info->pci_dev,
6603 &request.sg_descriptors[0], kernel_buffer,
6604 iocommand.buf_size, DMA_BIDIRECTIONAL);
6608 iu_length += sizeof(request.sg_descriptors[0]);
6611 put_unaligned_le16(iu_length, &request.header.iu_length);
6613 if (ctrl_info->raid_iu_timeout_supported)
6614 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6616 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6617 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6619 if (iocommand.buf_size > 0)
6620 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6623 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6626 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6627 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6628 iocommand.error_info.CommandStatus =
6629 ciss_error_info.command_status;
6630 sense_data_length = ciss_error_info.sense_data_length;
6631 if (sense_data_length) {
6632 if (sense_data_length >
6633 sizeof(iocommand.error_info.SenseInfo))
6635 sizeof(iocommand.error_info.SenseInfo);
6636 memcpy(iocommand.error_info.SenseInfo,
6637 pqi_error_info.data, sense_data_length);
6638 iocommand.error_info.SenseLen = sense_data_length;
6642 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6647 if (rc == 0 && iocommand.buf_size > 0 &&
6648 (iocommand.Request.Type.Direction & XFER_READ)) {
6649 if (copy_to_user(iocommand.buf, kernel_buffer,
6650 iocommand.buf_size)) {
6656 kfree(kernel_buffer);
6661 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6665 struct pqi_ctrl_info *ctrl_info;
6667 ctrl_info = shost_to_hba(sdev->host);
6670 case CCISS_DEREGDISK:
6671 case CCISS_REGNEWDISK:
6673 rc = pqi_scan_scsi_devices(ctrl_info);
6675 case CCISS_GETPCIINFO:
6676 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6678 case CCISS_GETDRIVVER:
6679 rc = pqi_getdrivver_ioctl(arg);
6681 case CCISS_PASSTHRU:
6682 rc = pqi_passthru_ioctl(ctrl_info, arg);
6692 static ssize_t pqi_firmware_version_show(struct device *dev,
6693 struct device_attribute *attr, char *buffer)
6695 struct Scsi_Host *shost;
6696 struct pqi_ctrl_info *ctrl_info;
6698 shost = class_to_shost(dev);
6699 ctrl_info = shost_to_hba(shost);
6701 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6704 static ssize_t pqi_driver_version_show(struct device *dev,
6705 struct device_attribute *attr, char *buffer)
6707 return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6710 static ssize_t pqi_serial_number_show(struct device *dev,
6711 struct device_attribute *attr, char *buffer)
6713 struct Scsi_Host *shost;
6714 struct pqi_ctrl_info *ctrl_info;
6716 shost = class_to_shost(dev);
6717 ctrl_info = shost_to_hba(shost);
6719 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6722 static ssize_t pqi_model_show(struct device *dev,
6723 struct device_attribute *attr, char *buffer)
6725 struct Scsi_Host *shost;
6726 struct pqi_ctrl_info *ctrl_info;
6728 shost = class_to_shost(dev);
6729 ctrl_info = shost_to_hba(shost);
6731 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6734 static ssize_t pqi_vendor_show(struct device *dev,
6735 struct device_attribute *attr, char *buffer)
6737 struct Scsi_Host *shost;
6738 struct pqi_ctrl_info *ctrl_info;
6740 shost = class_to_shost(dev);
6741 ctrl_info = shost_to_hba(shost);
6743 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6746 static ssize_t pqi_host_rescan_store(struct device *dev,
6747 struct device_attribute *attr, const char *buffer, size_t count)
6749 struct Scsi_Host *shost = class_to_shost(dev);
6751 pqi_scan_start(shost);
6756 static ssize_t pqi_lockup_action_show(struct device *dev,
6757 struct device_attribute *attr, char *buffer)
6762 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6763 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6764 count += scnprintf(buffer + count, PAGE_SIZE - count,
6765 "[%s] ", pqi_lockup_actions[i].name);
6767 count += scnprintf(buffer + count, PAGE_SIZE - count,
6768 "%s ", pqi_lockup_actions[i].name);
6771 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6776 static ssize_t pqi_lockup_action_store(struct device *dev,
6777 struct device_attribute *attr, const char *buffer, size_t count)
6781 char action_name_buffer[32];
6783 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6784 action_name = strstrip(action_name_buffer);
6786 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6787 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6788 pqi_lockup_action = pqi_lockup_actions[i].action;
6796 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6797 struct device_attribute *attr, char *buffer)
6799 struct Scsi_Host *shost = class_to_shost(dev);
6800 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6802 return scnprintf(buffer, 10, "%x\n",
6803 ctrl_info->enable_stream_detection);
6806 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
6807 struct device_attribute *attr, const char *buffer, size_t count)
6809 struct Scsi_Host *shost = class_to_shost(dev);
6810 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6811 u8 set_stream_detection = 0;
6813 if (kstrtou8(buffer, 0, &set_stream_detection))
6816 if (set_stream_detection > 0)
6817 set_stream_detection = 1;
6819 ctrl_info->enable_stream_detection = set_stream_detection;
6824 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
6825 struct device_attribute *attr, char *buffer)
6827 struct Scsi_Host *shost = class_to_shost(dev);
6828 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6830 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
6833 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
6834 struct device_attribute *attr, const char *buffer, size_t count)
6836 struct Scsi_Host *shost = class_to_shost(dev);
6837 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6838 u8 set_r5_writes = 0;
6840 if (kstrtou8(buffer, 0, &set_r5_writes))
6843 if (set_r5_writes > 0)
6846 ctrl_info->enable_r5_writes = set_r5_writes;
6851 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
6852 struct device_attribute *attr, char *buffer)
6854 struct Scsi_Host *shost = class_to_shost(dev);
6855 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6857 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
6860 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
6861 struct device_attribute *attr, const char *buffer, size_t count)
6863 struct Scsi_Host *shost = class_to_shost(dev);
6864 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6865 u8 set_r6_writes = 0;
6867 if (kstrtou8(buffer, 0, &set_r6_writes))
6870 if (set_r6_writes > 0)
6873 ctrl_info->enable_r6_writes = set_r6_writes;
6878 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6879 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6880 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6881 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6882 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6883 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6884 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
6885 pqi_lockup_action_store);
6886 static DEVICE_ATTR(enable_stream_detection, 0644,
6887 pqi_host_enable_stream_detection_show,
6888 pqi_host_enable_stream_detection_store);
6889 static DEVICE_ATTR(enable_r5_writes, 0644,
6890 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
6891 static DEVICE_ATTR(enable_r6_writes, 0644,
6892 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
6894 static struct attribute *pqi_shost_attrs[] = {
6895 &dev_attr_driver_version.attr,
6896 &dev_attr_firmware_version.attr,
6897 &dev_attr_model.attr,
6898 &dev_attr_serial_number.attr,
6899 &dev_attr_vendor.attr,
6900 &dev_attr_rescan.attr,
6901 &dev_attr_lockup_action.attr,
6902 &dev_attr_enable_stream_detection.attr,
6903 &dev_attr_enable_r5_writes.attr,
6904 &dev_attr_enable_r6_writes.attr,
6908 ATTRIBUTE_GROUPS(pqi_shost);
6910 static ssize_t pqi_unique_id_show(struct device *dev,
6911 struct device_attribute *attr, char *buffer)
6913 struct pqi_ctrl_info *ctrl_info;
6914 struct scsi_device *sdev;
6915 struct pqi_scsi_dev *device;
6916 unsigned long flags;
6919 sdev = to_scsi_device(dev);
6920 ctrl_info = shost_to_hba(sdev->host);
6922 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6924 device = sdev->hostdata;
6926 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6930 if (device->is_physical_device)
6931 memcpy(unique_id, device->wwid, sizeof(device->wwid));
6933 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
6935 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6937 return scnprintf(buffer, PAGE_SIZE,
6938 "%02X%02X%02X%02X%02X%02X%02X%02X"
6939 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
6940 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
6941 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
6942 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
6943 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
6946 static ssize_t pqi_lunid_show(struct device *dev,
6947 struct device_attribute *attr, char *buffer)
6949 struct pqi_ctrl_info *ctrl_info;
6950 struct scsi_device *sdev;
6951 struct pqi_scsi_dev *device;
6952 unsigned long flags;
6955 sdev = to_scsi_device(dev);
6956 ctrl_info = shost_to_hba(sdev->host);
6958 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6960 device = sdev->hostdata;
6962 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6966 memcpy(lunid, device->scsi3addr, sizeof(lunid));
6968 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6970 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6975 static ssize_t pqi_path_info_show(struct device *dev,
6976 struct device_attribute *attr, char *buf)
6978 struct pqi_ctrl_info *ctrl_info;
6979 struct scsi_device *sdev;
6980 struct pqi_scsi_dev *device;
6981 unsigned long flags;
6988 u8 phys_connector[2];
6990 sdev = to_scsi_device(dev);
6991 ctrl_info = shost_to_hba(sdev->host);
6993 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6995 device = sdev->hostdata;
6997 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7002 for (i = 0; i < MAX_PATHS; i++) {
7003 path_map_index = 1 << i;
7004 if (i == device->active_path_index)
7006 else if (device->path_map & path_map_index)
7007 active = "Inactive";
7011 output_len += scnprintf(buf + output_len,
7012 PAGE_SIZE - output_len,
7013 "[%d:%d:%d:%d] %20.20s ",
7014 ctrl_info->scsi_host->host_no,
7015 device->bus, device->target,
7017 scsi_device_type(device->devtype));
7019 if (device->devtype == TYPE_RAID ||
7020 pqi_is_logical_device(device))
7023 memcpy(&phys_connector, &device->phys_connector[i],
7024 sizeof(phys_connector));
7025 if (phys_connector[0] < '0')
7026 phys_connector[0] = '0';
7027 if (phys_connector[1] < '0')
7028 phys_connector[1] = '0';
7030 output_len += scnprintf(buf + output_len,
7031 PAGE_SIZE - output_len,
7032 "PORT: %.2s ", phys_connector);
7034 box = device->box[i];
7035 if (box != 0 && box != 0xFF)
7036 output_len += scnprintf(buf + output_len,
7037 PAGE_SIZE - output_len,
7040 if ((device->devtype == TYPE_DISK ||
7041 device->devtype == TYPE_ZBC) &&
7042 pqi_expose_device(device))
7043 output_len += scnprintf(buf + output_len,
7044 PAGE_SIZE - output_len,
7048 output_len += scnprintf(buf + output_len,
7049 PAGE_SIZE - output_len,
7053 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7058 static ssize_t pqi_sas_address_show(struct device *dev,
7059 struct device_attribute *attr, char *buffer)
7061 struct pqi_ctrl_info *ctrl_info;
7062 struct scsi_device *sdev;
7063 struct pqi_scsi_dev *device;
7064 unsigned long flags;
7067 sdev = to_scsi_device(dev);
7068 ctrl_info = shost_to_hba(sdev->host);
7070 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7072 device = sdev->hostdata;
7074 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7078 sas_address = device->sas_address;
7080 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7082 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
7085 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7086 struct device_attribute *attr, char *buffer)
7088 struct pqi_ctrl_info *ctrl_info;
7089 struct scsi_device *sdev;
7090 struct pqi_scsi_dev *device;
7091 unsigned long flags;
7093 sdev = to_scsi_device(dev);
7094 ctrl_info = shost_to_hba(sdev->host);
7096 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7098 device = sdev->hostdata;
7100 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7104 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
7108 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7113 static ssize_t pqi_raid_level_show(struct device *dev,
7114 struct device_attribute *attr, char *buffer)
7116 struct pqi_ctrl_info *ctrl_info;
7117 struct scsi_device *sdev;
7118 struct pqi_scsi_dev *device;
7119 unsigned long flags;
7122 sdev = to_scsi_device(dev);
7123 ctrl_info = shost_to_hba(sdev->host);
7125 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7127 device = sdev->hostdata;
7129 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7133 if (pqi_is_logical_device(device))
7134 raid_level = pqi_raid_level_to_string(device->raid_level);
7138 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7140 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
7143 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7144 struct device_attribute *attr, char *buffer)
7146 struct pqi_ctrl_info *ctrl_info;
7147 struct scsi_device *sdev;
7148 struct pqi_scsi_dev *device;
7149 unsigned long flags;
7150 int raid_bypass_cnt;
7152 sdev = to_scsi_device(dev);
7153 ctrl_info = shost_to_hba(sdev->host);
7155 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7157 device = sdev->hostdata;
7159 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7163 raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
7165 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7167 return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
7170 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
7171 struct device_attribute *attr, char *buf)
7173 struct pqi_ctrl_info *ctrl_info;
7174 struct scsi_device *sdev;
7175 struct pqi_scsi_dev *device;
7176 unsigned long flags;
7179 sdev = to_scsi_device(dev);
7180 ctrl_info = shost_to_hba(sdev->host);
7182 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7184 device = sdev->hostdata;
7186 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7190 output_len = snprintf(buf, PAGE_SIZE, "%d\n",
7191 device->ncq_prio_enable);
7192 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7197 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
7198 struct device_attribute *attr,
7199 const char *buf, size_t count)
7201 struct pqi_ctrl_info *ctrl_info;
7202 struct scsi_device *sdev;
7203 struct pqi_scsi_dev *device;
7204 unsigned long flags;
7205 u8 ncq_prio_enable = 0;
7207 if (kstrtou8(buf, 0, &ncq_prio_enable))
7210 sdev = to_scsi_device(dev);
7211 ctrl_info = shost_to_hba(sdev->host);
7213 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7215 device = sdev->hostdata;
7218 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7222 if (!device->ncq_prio_support ||
7223 !device->is_physical_device) {
7224 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7228 device->ncq_prio_enable = ncq_prio_enable;
7230 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7235 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7236 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7237 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
7238 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
7239 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
7240 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
7241 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
7242 static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
7243 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
7245 static struct attribute *pqi_sdev_attrs[] = {
7246 &dev_attr_lunid.attr,
7247 &dev_attr_unique_id.attr,
7248 &dev_attr_path_info.attr,
7249 &dev_attr_sas_address.attr,
7250 &dev_attr_ssd_smart_path_enabled.attr,
7251 &dev_attr_raid_level.attr,
7252 &dev_attr_raid_bypass_cnt.attr,
7253 &dev_attr_sas_ncq_prio_enable.attr,
7257 ATTRIBUTE_GROUPS(pqi_sdev);
7259 static struct scsi_host_template pqi_driver_template = {
7260 .module = THIS_MODULE,
7261 .name = DRIVER_NAME_SHORT,
7262 .proc_name = DRIVER_NAME_SHORT,
7263 .queuecommand = pqi_scsi_queue_command,
7264 .scan_start = pqi_scan_start,
7265 .scan_finished = pqi_scan_finished,
7267 .eh_device_reset_handler = pqi_eh_device_reset_handler,
7269 .slave_alloc = pqi_slave_alloc,
7270 .slave_configure = pqi_slave_configure,
7271 .map_queues = pqi_map_queues,
7272 .sdev_groups = pqi_sdev_groups,
7273 .shost_groups = pqi_shost_groups,
7274 .cmd_size = sizeof(struct pqi_cmd_priv),
7277 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7280 struct Scsi_Host *shost;
7282 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7284 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
7289 shost->n_io_port = 0;
7290 shost->this_id = -1;
7291 shost->max_channel = PQI_MAX_BUS;
7292 shost->max_cmd_len = MAX_COMMAND_SIZE;
7293 shost->max_lun = ~0;
7295 shost->max_sectors = ctrl_info->max_sectors;
7296 shost->can_queue = ctrl_info->scsi_ml_can_queue;
7297 shost->cmd_per_lun = shost->can_queue;
7298 shost->sg_tablesize = ctrl_info->sg_tablesize;
7299 shost->transportt = pqi_sas_transport_template;
7300 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
7301 shost->unique_id = shost->irq;
7302 shost->nr_hw_queues = ctrl_info->num_queue_groups;
7303 shost->host_tagset = 1;
7304 shost->hostdata[0] = (unsigned long)ctrl_info;
7306 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7308 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
7312 rc = pqi_add_sas_host(shost, ctrl_info);
7314 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
7318 ctrl_info->scsi_host = shost;
7323 scsi_remove_host(shost);
7325 scsi_host_put(shost);
7330 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7332 struct Scsi_Host *shost;
7334 pqi_delete_sas_host(ctrl_info);
7336 shost = ctrl_info->scsi_host;
7340 scsi_remove_host(shost);
7341 scsi_host_put(shost);
7344 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7347 struct pqi_device_registers __iomem *pqi_registers;
7348 unsigned long timeout;
7349 unsigned int timeout_msecs;
7350 union pqi_reset_register reset_reg;
7352 pqi_registers = ctrl_info->pqi_registers;
7353 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7354 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7357 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7358 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7359 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7361 pqi_check_ctrl_health(ctrl_info);
7362 if (pqi_ctrl_offline(ctrl_info)) {
7366 if (time_after(jiffies, timeout)) {
7375 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7378 union pqi_reset_register reset_reg;
7380 if (ctrl_info->pqi_reset_quiesce_supported) {
7381 rc = sis_pqi_reset_quiesce(ctrl_info);
7383 dev_err(&ctrl_info->pci_dev->dev,
7384 "PQI reset failed during quiesce with error %d\n", rc);
7389 reset_reg.all_bits = 0;
7390 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7391 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7393 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7395 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7397 dev_err(&ctrl_info->pci_dev->dev,
7398 "PQI reset failed with error %d\n", rc);
7403 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7406 struct bmic_sense_subsystem_info *sense_info;
7408 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7412 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7416 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7417 sizeof(sense_info->ctrl_serial_number));
7418 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7426 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7429 struct bmic_identify_controller *identify;
7431 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7435 rc = pqi_identify_controller(ctrl_info, identify);
7439 if (get_unaligned_le32(&identify->extra_controller_flags) &
7440 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7441 memcpy(ctrl_info->firmware_version,
7442 identify->firmware_version_long,
7443 sizeof(identify->firmware_version_long));
7445 memcpy(ctrl_info->firmware_version,
7446 identify->firmware_version_short,
7447 sizeof(identify->firmware_version_short));
7448 ctrl_info->firmware_version
7449 [sizeof(identify->firmware_version_short)] = '\0';
7450 snprintf(ctrl_info->firmware_version +
7451 strlen(ctrl_info->firmware_version),
7452 sizeof(ctrl_info->firmware_version) -
7453 sizeof(identify->firmware_version_short),
7455 get_unaligned_le16(&identify->firmware_build_number));
7458 memcpy(ctrl_info->model, identify->product_id,
7459 sizeof(identify->product_id));
7460 ctrl_info->model[sizeof(identify->product_id)] = '\0';
7462 memcpy(ctrl_info->vendor, identify->vendor_id,
7463 sizeof(identify->vendor_id));
7464 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7472 struct pqi_config_table_section_info {
7473 struct pqi_ctrl_info *ctrl_info;
7476 void __iomem *section_iomem_addr;
7479 static inline bool pqi_is_firmware_feature_supported(
7480 struct pqi_config_table_firmware_features *firmware_features,
7481 unsigned int bit_position)
7483 unsigned int byte_index;
7485 byte_index = bit_position / BITS_PER_BYTE;
7487 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7490 return firmware_features->features_supported[byte_index] &
7491 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7494 static inline bool pqi_is_firmware_feature_enabled(
7495 struct pqi_config_table_firmware_features *firmware_features,
7496 void __iomem *firmware_features_iomem_addr,
7497 unsigned int bit_position)
7499 unsigned int byte_index;
7500 u8 __iomem *features_enabled_iomem_addr;
7502 byte_index = (bit_position / BITS_PER_BYTE) +
7503 (le16_to_cpu(firmware_features->num_elements) * 2);
7505 features_enabled_iomem_addr = firmware_features_iomem_addr +
7506 offsetof(struct pqi_config_table_firmware_features,
7507 features_supported) + byte_index;
7509 return *((__force u8 *)features_enabled_iomem_addr) &
7510 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7513 static inline void pqi_request_firmware_feature(
7514 struct pqi_config_table_firmware_features *firmware_features,
7515 unsigned int bit_position)
7517 unsigned int byte_index;
7519 byte_index = (bit_position / BITS_PER_BYTE) +
7520 le16_to_cpu(firmware_features->num_elements);
7522 firmware_features->features_supported[byte_index] |=
7523 (1 << (bit_position % BITS_PER_BYTE));
7526 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7527 u16 first_section, u16 last_section)
7529 struct pqi_vendor_general_request request;
7531 memset(&request, 0, sizeof(request));
7533 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7534 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7535 &request.header.iu_length);
7536 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7537 &request.function_code);
7538 put_unaligned_le16(first_section,
7539 &request.data.config_table_update.first_section);
7540 put_unaligned_le16(last_section,
7541 &request.data.config_table_update.last_section);
7543 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7546 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7547 struct pqi_config_table_firmware_features *firmware_features,
7548 void __iomem *firmware_features_iomem_addr)
7550 void *features_requested;
7551 void __iomem *features_requested_iomem_addr;
7552 void __iomem *host_max_known_feature_iomem_addr;
7554 features_requested = firmware_features->features_supported +
7555 le16_to_cpu(firmware_features->num_elements);
7557 features_requested_iomem_addr = firmware_features_iomem_addr +
7558 (features_requested - (void *)firmware_features);
7560 memcpy_toio(features_requested_iomem_addr, features_requested,
7561 le16_to_cpu(firmware_features->num_elements));
7563 if (pqi_is_firmware_feature_supported(firmware_features,
7564 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7565 host_max_known_feature_iomem_addr =
7566 features_requested_iomem_addr +
7567 (le16_to_cpu(firmware_features->num_elements) * 2) +
7569 writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
7570 host_max_known_feature_iomem_addr);
7573 return pqi_config_table_update(ctrl_info,
7574 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7575 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7578 struct pqi_firmware_feature {
7580 unsigned int feature_bit;
7583 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7584 struct pqi_firmware_feature *firmware_feature);
7587 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7588 struct pqi_firmware_feature *firmware_feature)
7590 if (!firmware_feature->supported) {
7591 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7592 firmware_feature->feature_name);
7596 if (firmware_feature->enabled) {
7597 dev_info(&ctrl_info->pci_dev->dev,
7598 "%s enabled\n", firmware_feature->feature_name);
7602 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7603 firmware_feature->feature_name);
7606 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7607 struct pqi_firmware_feature *firmware_feature)
7609 switch (firmware_feature->feature_bit) {
7610 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7611 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7613 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7614 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7616 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7617 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7619 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7620 ctrl_info->soft_reset_handshake_supported =
7621 firmware_feature->enabled &&
7622 pqi_read_soft_reset_status(ctrl_info);
7624 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7625 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7627 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7628 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7630 case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7631 ctrl_info->firmware_triage_supported = firmware_feature->enabled;
7632 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
7634 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7635 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7639 pqi_firmware_feature_status(ctrl_info, firmware_feature);
7642 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7643 struct pqi_firmware_feature *firmware_feature)
7645 if (firmware_feature->feature_status)
7646 firmware_feature->feature_status(ctrl_info, firmware_feature);
7649 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7651 static struct pqi_firmware_feature pqi_firmware_features[] = {
7653 .feature_name = "Online Firmware Activation",
7654 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7655 .feature_status = pqi_firmware_feature_status,
7658 .feature_name = "Serial Management Protocol",
7659 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7660 .feature_status = pqi_firmware_feature_status,
7663 .feature_name = "Maximum Known Feature",
7664 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7665 .feature_status = pqi_firmware_feature_status,
7668 .feature_name = "RAID 0 Read Bypass",
7669 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7670 .feature_status = pqi_firmware_feature_status,
7673 .feature_name = "RAID 1 Read Bypass",
7674 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7675 .feature_status = pqi_firmware_feature_status,
7678 .feature_name = "RAID 5 Read Bypass",
7679 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7680 .feature_status = pqi_firmware_feature_status,
7683 .feature_name = "RAID 6 Read Bypass",
7684 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7685 .feature_status = pqi_firmware_feature_status,
7688 .feature_name = "RAID 0 Write Bypass",
7689 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7690 .feature_status = pqi_firmware_feature_status,
7693 .feature_name = "RAID 1 Write Bypass",
7694 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7695 .feature_status = pqi_ctrl_update_feature_flags,
7698 .feature_name = "RAID 5 Write Bypass",
7699 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7700 .feature_status = pqi_ctrl_update_feature_flags,
7703 .feature_name = "RAID 6 Write Bypass",
7704 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7705 .feature_status = pqi_ctrl_update_feature_flags,
7708 .feature_name = "New Soft Reset Handshake",
7709 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
7710 .feature_status = pqi_ctrl_update_feature_flags,
7713 .feature_name = "RAID IU Timeout",
7714 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
7715 .feature_status = pqi_ctrl_update_feature_flags,
7718 .feature_name = "TMF IU Timeout",
7719 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
7720 .feature_status = pqi_ctrl_update_feature_flags,
7723 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
7724 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
7725 .feature_status = pqi_firmware_feature_status,
7728 .feature_name = "Firmware Triage",
7729 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
7730 .feature_status = pqi_ctrl_update_feature_flags,
7733 .feature_name = "RPL Extended Formats 4 and 5",
7734 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
7735 .feature_status = pqi_ctrl_update_feature_flags,
7739 static void pqi_process_firmware_features(
7740 struct pqi_config_table_section_info *section_info)
7743 struct pqi_ctrl_info *ctrl_info;
7744 struct pqi_config_table_firmware_features *firmware_features;
7745 void __iomem *firmware_features_iomem_addr;
7747 unsigned int num_features_supported;
7749 ctrl_info = section_info->ctrl_info;
7750 firmware_features = section_info->section;
7751 firmware_features_iomem_addr = section_info->section_iomem_addr;
7753 for (i = 0, num_features_supported = 0;
7754 i < ARRAY_SIZE(pqi_firmware_features); i++) {
7755 if (pqi_is_firmware_feature_supported(firmware_features,
7756 pqi_firmware_features[i].feature_bit)) {
7757 pqi_firmware_features[i].supported = true;
7758 num_features_supported++;
7760 pqi_firmware_feature_update(ctrl_info,
7761 &pqi_firmware_features[i]);
7765 if (num_features_supported == 0)
7768 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7769 if (!pqi_firmware_features[i].supported)
7771 pqi_request_firmware_feature(firmware_features,
7772 pqi_firmware_features[i].feature_bit);
7775 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
7776 firmware_features_iomem_addr);
7778 dev_err(&ctrl_info->pci_dev->dev,
7779 "failed to enable firmware features in PQI configuration table\n");
7780 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7781 if (!pqi_firmware_features[i].supported)
7783 pqi_firmware_feature_update(ctrl_info,
7784 &pqi_firmware_features[i]);
7789 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7790 if (!pqi_firmware_features[i].supported)
7792 if (pqi_is_firmware_feature_enabled(firmware_features,
7793 firmware_features_iomem_addr,
7794 pqi_firmware_features[i].feature_bit)) {
7795 pqi_firmware_features[i].enabled = true;
7797 pqi_firmware_feature_update(ctrl_info,
7798 &pqi_firmware_features[i]);
7802 static void pqi_init_firmware_features(void)
7806 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7807 pqi_firmware_features[i].supported = false;
7808 pqi_firmware_features[i].enabled = false;
7812 static void pqi_process_firmware_features_section(
7813 struct pqi_config_table_section_info *section_info)
7815 mutex_lock(&pqi_firmware_features_mutex);
7816 pqi_init_firmware_features();
7817 pqi_process_firmware_features(section_info);
7818 mutex_unlock(&pqi_firmware_features_mutex);
7822 * Reset all controller settings that can be initialized during the processing
7823 * of the PQI Configuration Table.
7826 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
7828 ctrl_info->heartbeat_counter = NULL;
7829 ctrl_info->soft_reset_status = NULL;
7830 ctrl_info->soft_reset_handshake_supported = false;
7831 ctrl_info->enable_r1_writes = false;
7832 ctrl_info->enable_r5_writes = false;
7833 ctrl_info->enable_r6_writes = false;
7834 ctrl_info->raid_iu_timeout_supported = false;
7835 ctrl_info->tmf_iu_timeout_supported = false;
7836 ctrl_info->firmware_triage_supported = false;
7837 ctrl_info->rpl_extended_format_4_5_supported = false;
7840 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7844 bool firmware_feature_section_present;
7845 void __iomem *table_iomem_addr;
7846 struct pqi_config_table *config_table;
7847 struct pqi_config_table_section_header *section;
7848 struct pqi_config_table_section_info section_info;
7849 struct pqi_config_table_section_info feature_section_info;
7851 table_length = ctrl_info->config_table_length;
7852 if (table_length == 0)
7855 config_table = kmalloc(table_length, GFP_KERNEL);
7856 if (!config_table) {
7857 dev_err(&ctrl_info->pci_dev->dev,
7858 "failed to allocate memory for PQI configuration table\n");
7863 * Copy the config table contents from I/O memory space into the
7866 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
7867 memcpy_fromio(config_table, table_iomem_addr, table_length);
7869 firmware_feature_section_present = false;
7870 section_info.ctrl_info = ctrl_info;
7871 section_offset = get_unaligned_le32(&config_table->first_section_offset);
7873 while (section_offset) {
7874 section = (void *)config_table + section_offset;
7876 section_info.section = section;
7877 section_info.section_offset = section_offset;
7878 section_info.section_iomem_addr = table_iomem_addr + section_offset;
7880 switch (get_unaligned_le16(§ion->section_id)) {
7881 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7882 firmware_feature_section_present = true;
7883 feature_section_info = section_info;
7885 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
7886 if (pqi_disable_heartbeat)
7887 dev_warn(&ctrl_info->pci_dev->dev,
7888 "heartbeat disabled by module parameter\n");
7890 ctrl_info->heartbeat_counter =
7893 offsetof(struct pqi_config_table_heartbeat,
7896 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7897 ctrl_info->soft_reset_status =
7900 offsetof(struct pqi_config_table_soft_reset,
7905 section_offset = get_unaligned_le16(§ion->next_section_offset);
7909 * We process the firmware feature section after all other sections
7910 * have been processed so that the feature bit callbacks can take
7911 * into account the settings configured by other sections.
7913 if (firmware_feature_section_present)
7914 pqi_process_firmware_features_section(&feature_section_info);
7916 kfree(config_table);
7921 /* Switches the controller from PQI mode back into SIS mode. */
7923 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7927 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
7928 rc = pqi_reset(ctrl_info);
7931 rc = sis_reenable_sis_mode(ctrl_info);
7933 dev_err(&ctrl_info->pci_dev->dev,
7934 "re-enabling SIS mode failed with error %d\n", rc);
7937 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7943 * If the controller isn't already in SIS mode, this function forces it into
7947 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
7949 if (!sis_is_firmware_running(ctrl_info))
7952 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
7955 if (sis_is_kernel_up(ctrl_info)) {
7956 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7960 return pqi_revert_to_sis_mode(ctrl_info);
7963 static void pqi_perform_lockup_action(void)
7965 switch (pqi_lockup_action) {
7967 panic("FATAL: Smart Family Controller lockup detected");
7970 emergency_restart();
7978 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7983 if (reset_devices) {
7984 if (pqi_is_fw_triage_supported(ctrl_info)) {
7985 rc = sis_wait_for_fw_triage_completion(ctrl_info);
7989 sis_soft_reset(ctrl_info);
7990 ssleep(PQI_POST_RESET_DELAY_SECS);
7992 rc = pqi_force_sis_mode(ctrl_info);
7998 * Wait until the controller is ready to start accepting SIS
8001 rc = sis_wait_for_ctrl_ready(ctrl_info);
8003 if (reset_devices) {
8004 dev_err(&ctrl_info->pci_dev->dev,
8005 "kdump init failed with error %d\n", rc);
8006 pqi_lockup_action = REBOOT;
8007 pqi_perform_lockup_action();
8013 * Get the controller properties. This allows us to determine
8014 * whether or not it supports PQI mode.
8016 rc = sis_get_ctrl_properties(ctrl_info);
8018 dev_err(&ctrl_info->pci_dev->dev,
8019 "error obtaining controller properties\n");
8023 rc = sis_get_pqi_capabilities(ctrl_info);
8025 dev_err(&ctrl_info->pci_dev->dev,
8026 "error obtaining controller capabilities\n");
8030 product_id = sis_get_product_id(ctrl_info);
8031 ctrl_info->product_id = (u8)product_id;
8032 ctrl_info->product_revision = (u8)(product_id >> 8);
8034 if (reset_devices) {
8035 if (ctrl_info->max_outstanding_requests >
8036 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
8037 ctrl_info->max_outstanding_requests =
8038 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
8040 if (ctrl_info->max_outstanding_requests >
8041 PQI_MAX_OUTSTANDING_REQUESTS)
8042 ctrl_info->max_outstanding_requests =
8043 PQI_MAX_OUTSTANDING_REQUESTS;
8046 pqi_calculate_io_resources(ctrl_info);
8048 rc = pqi_alloc_error_buffer(ctrl_info);
8050 dev_err(&ctrl_info->pci_dev->dev,
8051 "failed to allocate PQI error buffer\n");
8056 * If the function we are about to call succeeds, the
8057 * controller will transition from legacy SIS mode
8060 rc = sis_init_base_struct_addr(ctrl_info);
8062 dev_err(&ctrl_info->pci_dev->dev,
8063 "error initializing PQI mode\n");
8067 /* Wait for the controller to complete the SIS -> PQI transition. */
8068 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8070 dev_err(&ctrl_info->pci_dev->dev,
8071 "transition to PQI mode failed\n");
8075 /* From here on, we are running in PQI mode. */
8076 ctrl_info->pqi_mode_enabled = true;
8077 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8079 rc = pqi_alloc_admin_queues(ctrl_info);
8081 dev_err(&ctrl_info->pci_dev->dev,
8082 "failed to allocate admin queues\n");
8086 rc = pqi_create_admin_queues(ctrl_info);
8088 dev_err(&ctrl_info->pci_dev->dev,
8089 "error creating admin queues\n");
8093 rc = pqi_report_device_capability(ctrl_info);
8095 dev_err(&ctrl_info->pci_dev->dev,
8096 "obtaining device capability failed\n");
8100 rc = pqi_validate_device_capability(ctrl_info);
8104 pqi_calculate_queue_resources(ctrl_info);
8106 rc = pqi_enable_msix_interrupts(ctrl_info);
8110 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
8111 ctrl_info->max_msix_vectors =
8112 ctrl_info->num_msix_vectors_enabled;
8113 pqi_calculate_queue_resources(ctrl_info);
8116 rc = pqi_alloc_io_resources(ctrl_info);
8120 rc = pqi_alloc_operational_queues(ctrl_info);
8122 dev_err(&ctrl_info->pci_dev->dev,
8123 "failed to allocate operational queues\n");
8127 pqi_init_operational_queues(ctrl_info);
8129 rc = pqi_create_queues(ctrl_info);
8133 rc = pqi_request_irqs(ctrl_info);
8137 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8139 ctrl_info->controller_online = true;
8141 rc = pqi_process_config_table(ctrl_info);
8145 pqi_start_heartbeat_timer(ctrl_info);
8147 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8148 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8149 if (rc) { /* Supported features not returned correctly. */
8150 dev_err(&ctrl_info->pci_dev->dev,
8151 "error obtaining advanced RAID bypass configuration\n");
8154 ctrl_info->ciss_report_log_flags |=
8155 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8158 rc = pqi_enable_events(ctrl_info);
8160 dev_err(&ctrl_info->pci_dev->dev,
8161 "error enabling events\n");
8165 /* Register with the SCSI subsystem. */
8166 rc = pqi_register_scsi(ctrl_info);
8170 rc = pqi_get_ctrl_product_details(ctrl_info);
8172 dev_err(&ctrl_info->pci_dev->dev,
8173 "error obtaining product details\n");
8177 rc = pqi_get_ctrl_serial_number(ctrl_info);
8179 dev_err(&ctrl_info->pci_dev->dev,
8180 "error obtaining ctrl serial number\n");
8184 rc = pqi_set_diag_rescan(ctrl_info);
8186 dev_err(&ctrl_info->pci_dev->dev,
8187 "error enabling multi-lun rescan\n");
8191 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8193 dev_err(&ctrl_info->pci_dev->dev,
8194 "error updating host wellness\n");
8198 pqi_schedule_update_time_worker(ctrl_info);
8200 pqi_scan_scsi_devices(ctrl_info);
8205 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8208 struct pqi_admin_queues *admin_queues;
8209 struct pqi_event_queue *event_queue;
8211 admin_queues = &ctrl_info->admin_queues;
8212 admin_queues->iq_pi_copy = 0;
8213 admin_queues->oq_ci_copy = 0;
8214 writel(0, admin_queues->oq_pi);
8216 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8217 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8218 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8219 ctrl_info->queue_groups[i].oq_ci_copy = 0;
8221 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8222 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8223 writel(0, ctrl_info->queue_groups[i].oq_pi);
8226 event_queue = &ctrl_info->event_queue;
8227 writel(0, event_queue->oq_pi);
8228 event_queue->oq_ci_copy = 0;
8231 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8235 rc = pqi_force_sis_mode(ctrl_info);
8240 * Wait until the controller is ready to start accepting SIS
8243 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8248 * Get the controller properties. This allows us to determine
8249 * whether or not it supports PQI mode.
8251 rc = sis_get_ctrl_properties(ctrl_info);
8253 dev_err(&ctrl_info->pci_dev->dev,
8254 "error obtaining controller properties\n");
8258 rc = sis_get_pqi_capabilities(ctrl_info);
8260 dev_err(&ctrl_info->pci_dev->dev,
8261 "error obtaining controller capabilities\n");
8266 * If the function we are about to call succeeds, the
8267 * controller will transition from legacy SIS mode
8270 rc = sis_init_base_struct_addr(ctrl_info);
8272 dev_err(&ctrl_info->pci_dev->dev,
8273 "error initializing PQI mode\n");
8277 /* Wait for the controller to complete the SIS -> PQI transition. */
8278 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8280 dev_err(&ctrl_info->pci_dev->dev,
8281 "transition to PQI mode failed\n");
8285 /* From here on, we are running in PQI mode. */
8286 ctrl_info->pqi_mode_enabled = true;
8287 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8289 pqi_reinit_queues(ctrl_info);
8291 rc = pqi_create_admin_queues(ctrl_info);
8293 dev_err(&ctrl_info->pci_dev->dev,
8294 "error creating admin queues\n");
8298 rc = pqi_create_queues(ctrl_info);
8302 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8304 ctrl_info->controller_online = true;
8305 pqi_ctrl_unblock_requests(ctrl_info);
8307 pqi_ctrl_reset_config(ctrl_info);
8309 rc = pqi_process_config_table(ctrl_info);
8313 pqi_start_heartbeat_timer(ctrl_info);
8315 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8316 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8318 dev_err(&ctrl_info->pci_dev->dev,
8319 "error obtaining advanced RAID bypass configuration\n");
8322 ctrl_info->ciss_report_log_flags |=
8323 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8326 rc = pqi_enable_events(ctrl_info);
8328 dev_err(&ctrl_info->pci_dev->dev,
8329 "error enabling events\n");
8333 rc = pqi_get_ctrl_product_details(ctrl_info);
8335 dev_err(&ctrl_info->pci_dev->dev,
8336 "error obtaining product details\n");
8340 rc = pqi_set_diag_rescan(ctrl_info);
8342 dev_err(&ctrl_info->pci_dev->dev,
8343 "error enabling multi-lun rescan\n");
8347 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8349 dev_err(&ctrl_info->pci_dev->dev,
8350 "error updating host wellness\n");
8354 if (pqi_ofa_in_progress(ctrl_info))
8355 pqi_ctrl_unblock_scan(ctrl_info);
8357 pqi_scan_scsi_devices(ctrl_info);
8362 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
8366 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
8367 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8369 return pcibios_err_to_errno(rc);
8372 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8377 rc = pci_enable_device(ctrl_info->pci_dev);
8379 dev_err(&ctrl_info->pci_dev->dev,
8380 "failed to enable PCI device\n");
8384 if (sizeof(dma_addr_t) > 4)
8385 mask = DMA_BIT_MASK(64);
8387 mask = DMA_BIT_MASK(32);
8389 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8391 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8392 goto disable_device;
8395 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8397 dev_err(&ctrl_info->pci_dev->dev,
8398 "failed to obtain PCI resources\n");
8399 goto disable_device;
8402 ctrl_info->iomem_base = ioremap(pci_resource_start(
8403 ctrl_info->pci_dev, 0),
8404 sizeof(struct pqi_ctrl_registers));
8405 if (!ctrl_info->iomem_base) {
8406 dev_err(&ctrl_info->pci_dev->dev,
8407 "failed to map memory for controller registers\n");
8409 goto release_regions;
8412 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
8414 /* Increase the PCIe completion timeout. */
8415 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8416 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8418 dev_err(&ctrl_info->pci_dev->dev,
8419 "failed to set PCIe completion timeout\n");
8420 goto release_regions;
8423 /* Enable bus mastering. */
8424 pci_set_master(ctrl_info->pci_dev);
8426 ctrl_info->registers = ctrl_info->iomem_base;
8427 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8429 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8434 pci_release_regions(ctrl_info->pci_dev);
8436 pci_disable_device(ctrl_info->pci_dev);
8441 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8443 iounmap(ctrl_info->iomem_base);
8444 pci_release_regions(ctrl_info->pci_dev);
8445 if (pci_is_enabled(ctrl_info->pci_dev))
8446 pci_disable_device(ctrl_info->pci_dev);
8447 pci_set_drvdata(ctrl_info->pci_dev, NULL);
8450 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8452 struct pqi_ctrl_info *ctrl_info;
8454 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8455 GFP_KERNEL, numa_node);
8459 mutex_init(&ctrl_info->scan_mutex);
8460 mutex_init(&ctrl_info->lun_reset_mutex);
8461 mutex_init(&ctrl_info->ofa_mutex);
8463 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8464 spin_lock_init(&ctrl_info->scsi_device_list_lock);
8466 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8467 atomic_set(&ctrl_info->num_interrupts, 0);
8469 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8470 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8472 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8473 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8475 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8476 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8478 sema_init(&ctrl_info->sync_request_sem,
8479 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8480 init_waitqueue_head(&ctrl_info->block_requests_wait);
8482 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8483 ctrl_info->irq_mode = IRQ_MODE_NONE;
8484 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8486 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8487 ctrl_info->max_transfer_encrypted_sas_sata =
8488 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8489 ctrl_info->max_transfer_encrypted_nvme =
8490 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8491 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8492 ctrl_info->max_write_raid_1_10_2drive = ~0;
8493 ctrl_info->max_write_raid_1_10_3drive = ~0;
8498 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8503 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8505 pqi_free_irqs(ctrl_info);
8506 pqi_disable_msix_interrupts(ctrl_info);
8509 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8511 pqi_stop_heartbeat_timer(ctrl_info);
8512 pqi_free_interrupts(ctrl_info);
8513 if (ctrl_info->queue_memory_base)
8514 dma_free_coherent(&ctrl_info->pci_dev->dev,
8515 ctrl_info->queue_memory_length,
8516 ctrl_info->queue_memory_base,
8517 ctrl_info->queue_memory_base_dma_handle);
8518 if (ctrl_info->admin_queue_memory_base)
8519 dma_free_coherent(&ctrl_info->pci_dev->dev,
8520 ctrl_info->admin_queue_memory_length,
8521 ctrl_info->admin_queue_memory_base,
8522 ctrl_info->admin_queue_memory_base_dma_handle);
8523 pqi_free_all_io_requests(ctrl_info);
8524 if (ctrl_info->error_buffer)
8525 dma_free_coherent(&ctrl_info->pci_dev->dev,
8526 ctrl_info->error_buffer_length,
8527 ctrl_info->error_buffer,
8528 ctrl_info->error_buffer_dma_handle);
8529 if (ctrl_info->iomem_base)
8530 pqi_cleanup_pci_init(ctrl_info);
8531 pqi_free_ctrl_info(ctrl_info);
8534 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8536 pqi_cancel_rescan_worker(ctrl_info);
8537 pqi_cancel_update_time_worker(ctrl_info);
8538 pqi_remove_all_scsi_devices(ctrl_info);
8539 pqi_unregister_scsi(ctrl_info);
8540 if (ctrl_info->pqi_mode_enabled)
8541 pqi_revert_to_sis_mode(ctrl_info);
8542 pqi_free_ctrl_resources(ctrl_info);
8545 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8547 pqi_ctrl_block_scan(ctrl_info);
8548 pqi_scsi_block_requests(ctrl_info);
8549 pqi_ctrl_block_device_reset(ctrl_info);
8550 pqi_ctrl_block_requests(ctrl_info);
8551 pqi_ctrl_wait_until_quiesced(ctrl_info);
8552 pqi_stop_heartbeat_timer(ctrl_info);
8555 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8557 pqi_start_heartbeat_timer(ctrl_info);
8558 pqi_ctrl_unblock_requests(ctrl_info);
8559 pqi_ctrl_unblock_device_reset(ctrl_info);
8560 pqi_scsi_unblock_requests(ctrl_info);
8561 pqi_ctrl_unblock_scan(ctrl_info);
8564 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
8569 struct pqi_ofa_memory *ofap;
8570 struct pqi_sg_descriptor *mem_descriptor;
8571 dma_addr_t dma_handle;
8573 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8575 sg_count = DIV_ROUND_UP(total_size, chunk_size);
8576 if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
8579 ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
8580 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
8583 dev = &ctrl_info->pci_dev->dev;
8585 for (i = 0; i < sg_count; i++) {
8586 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
8587 dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8588 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
8589 goto out_free_chunks;
8590 mem_descriptor = &ofap->sg_descriptor[i];
8591 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8592 put_unaligned_le32(chunk_size, &mem_descriptor->length);
8595 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8596 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
8597 put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
8603 mem_descriptor = &ofap->sg_descriptor[i];
8604 dma_free_coherent(dev, chunk_size,
8605 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8606 get_unaligned_le64(&mem_descriptor->address));
8608 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8614 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
8620 if (ctrl_info->ofa_bytes_requested == 0)
8623 total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
8624 min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
8625 min_chunk_size = PAGE_ALIGN(min_chunk_size);
8627 for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
8628 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
8631 chunk_size = PAGE_ALIGN(chunk_size);
8637 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
8640 struct pqi_ofa_memory *ofap;
8642 dev = &ctrl_info->pci_dev->dev;
8644 ofap = dma_alloc_coherent(dev, sizeof(*ofap),
8645 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
8649 ctrl_info->pqi_ofa_mem_virt_addr = ofap;
8651 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
8653 "failed to allocate host buffer for Online Firmware Activation\n");
8654 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
8655 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8659 put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
8660 memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
8663 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
8667 struct pqi_ofa_memory *ofap;
8668 struct pqi_sg_descriptor *mem_descriptor;
8669 unsigned int num_memory_descriptors;
8671 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8675 dev = &ctrl_info->pci_dev->dev;
8677 if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
8680 mem_descriptor = ofap->sg_descriptor;
8681 num_memory_descriptors =
8682 get_unaligned_le16(&ofap->num_memory_descriptors);
8684 for (i = 0; i < num_memory_descriptors; i++) {
8685 dma_free_coherent(dev,
8686 get_unaligned_le32(&mem_descriptor[i].length),
8687 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8688 get_unaligned_le64(&mem_descriptor[i].address));
8690 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8693 dma_free_coherent(dev, sizeof(*ofap), ofap,
8694 ctrl_info->pqi_ofa_mem_dma_handle);
8695 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8698 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
8701 struct pqi_vendor_general_request request;
8702 struct pqi_ofa_memory *ofap;
8704 memset(&request, 0, sizeof(request));
8706 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
8707 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
8708 &request.header.iu_length);
8709 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
8710 &request.function_code);
8712 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8715 buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
8716 get_unaligned_le16(&ofap->num_memory_descriptors) *
8717 sizeof(struct pqi_sg_descriptor);
8719 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
8720 &request.data.ofa_memory_allocation.buffer_address);
8721 put_unaligned_le32(buffer_length,
8722 &request.data.ofa_memory_allocation.buffer_length);
8725 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
8728 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8732 return pqi_ctrl_init_resume(ctrl_info);
8735 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
8736 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
8737 .status = SAM_STAT_CHECK_CONDITION,
8740 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
8743 struct pqi_io_request *io_request;
8744 struct scsi_cmnd *scmd;
8745 struct scsi_device *sdev;
8747 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8748 io_request = &ctrl_info->io_request_pool[i];
8749 if (atomic_read(&io_request->refcount) == 0)
8752 scmd = io_request->scmd;
8754 sdev = scmd->device;
8755 if (!sdev || !scsi_device_online(sdev)) {
8756 pqi_free_io_request(io_request);
8759 set_host_byte(scmd, DID_NO_CONNECT);
8762 io_request->status = -ENXIO;
8763 io_request->error_info =
8764 &pqi_ctrl_offline_raid_error_info;
8767 io_request->io_complete_callback(io_request,
8768 io_request->context);
8772 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
8774 pqi_perform_lockup_action();
8775 pqi_stop_heartbeat_timer(ctrl_info);
8776 pqi_free_interrupts(ctrl_info);
8777 pqi_cancel_rescan_worker(ctrl_info);
8778 pqi_cancel_update_time_worker(ctrl_info);
8779 pqi_ctrl_wait_until_quiesced(ctrl_info);
8780 pqi_fail_all_outstanding_requests(ctrl_info);
8781 pqi_ctrl_unblock_requests(ctrl_info);
8784 static void pqi_ctrl_offline_worker(struct work_struct *work)
8786 struct pqi_ctrl_info *ctrl_info;
8788 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
8789 pqi_take_ctrl_offline_deferred(ctrl_info);
8792 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
8793 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
8795 if (!ctrl_info->controller_online)
8798 ctrl_info->controller_online = false;
8799 ctrl_info->pqi_mode_enabled = false;
8800 pqi_ctrl_block_requests(ctrl_info);
8801 if (!pqi_disable_ctrl_shutdown)
8802 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
8803 pci_disable_device(ctrl_info->pci_dev);
8804 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
8805 schedule_work(&ctrl_info->ctrl_offline_work);
8808 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
8809 const struct pci_device_id *id)
8811 char *ctrl_description;
8813 if (id->driver_data)
8814 ctrl_description = (char *)id->driver_data;
8816 ctrl_description = "Microchip Smart Family Controller";
8818 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
8821 static int pqi_pci_probe(struct pci_dev *pci_dev,
8822 const struct pci_device_id *id)
8826 struct pqi_ctrl_info *ctrl_info;
8828 pqi_print_ctrl_info(pci_dev, id);
8830 if (pqi_disable_device_id_wildcards &&
8831 id->subvendor == PCI_ANY_ID &&
8832 id->subdevice == PCI_ANY_ID) {
8833 dev_warn(&pci_dev->dev,
8834 "controller not probed because device ID wildcards are disabled\n");
8838 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
8839 dev_warn(&pci_dev->dev,
8840 "controller device ID matched using wildcards\n");
8842 node = dev_to_node(&pci_dev->dev);
8843 if (node == NUMA_NO_NODE) {
8844 node = cpu_to_node(0);
8845 if (node == NUMA_NO_NODE)
8847 set_dev_node(&pci_dev->dev, node);
8850 ctrl_info = pqi_alloc_ctrl_info(node);
8852 dev_err(&pci_dev->dev,
8853 "failed to allocate controller info block\n");
8857 ctrl_info->pci_dev = pci_dev;
8859 rc = pqi_pci_init(ctrl_info);
8863 rc = pqi_ctrl_init(ctrl_info);
8870 pqi_remove_ctrl(ctrl_info);
8875 static void pqi_pci_remove(struct pci_dev *pci_dev)
8877 struct pqi_ctrl_info *ctrl_info;
8879 ctrl_info = pci_get_drvdata(pci_dev);
8883 pqi_remove_ctrl(ctrl_info);
8886 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
8889 struct pqi_io_request *io_request;
8890 struct scsi_cmnd *scmd;
8892 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8893 io_request = &ctrl_info->io_request_pool[i];
8894 if (atomic_read(&io_request->refcount) == 0)
8896 scmd = io_request->scmd;
8897 WARN_ON(scmd != NULL); /* IO command from SML */
8898 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
8902 static void pqi_shutdown(struct pci_dev *pci_dev)
8905 struct pqi_ctrl_info *ctrl_info;
8906 enum bmic_flush_cache_shutdown_event shutdown_event;
8908 ctrl_info = pci_get_drvdata(pci_dev);
8910 dev_err(&pci_dev->dev,
8911 "cache could not be flushed\n");
8915 pqi_wait_until_ofa_finished(ctrl_info);
8917 pqi_scsi_block_requests(ctrl_info);
8918 pqi_ctrl_block_device_reset(ctrl_info);
8919 pqi_ctrl_block_requests(ctrl_info);
8920 pqi_ctrl_wait_until_quiesced(ctrl_info);
8922 if (system_state == SYSTEM_RESTART)
8923 shutdown_event = RESTART;
8925 shutdown_event = SHUTDOWN;
8928 * Write all data in the controller's battery-backed cache to
8931 rc = pqi_flush_cache(ctrl_info, shutdown_event);
8933 dev_err(&pci_dev->dev,
8934 "unable to flush controller cache\n");
8936 pqi_crash_if_pending_command(ctrl_info);
8937 pqi_reset(ctrl_info);
8940 static void pqi_process_lockup_action_param(void)
8944 if (!pqi_lockup_action_param)
8947 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
8948 if (strcmp(pqi_lockup_action_param,
8949 pqi_lockup_actions[i].name) == 0) {
8950 pqi_lockup_action = pqi_lockup_actions[i].action;
8955 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
8956 DRIVER_NAME_SHORT, pqi_lockup_action_param);
8959 static void pqi_process_module_params(void)
8961 pqi_process_lockup_action_param();
8964 #if defined(CONFIG_PM)
8966 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
8968 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
8974 static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
8976 struct pci_dev *pci_dev;
8977 struct pqi_ctrl_info *ctrl_info;
8979 pci_dev = to_pci_dev(dev);
8980 ctrl_info = pci_get_drvdata(pci_dev);
8982 pqi_wait_until_ofa_finished(ctrl_info);
8984 pqi_ctrl_block_scan(ctrl_info);
8985 pqi_scsi_block_requests(ctrl_info);
8986 pqi_ctrl_block_device_reset(ctrl_info);
8987 pqi_ctrl_block_requests(ctrl_info);
8988 pqi_ctrl_wait_until_quiesced(ctrl_info);
8991 enum bmic_flush_cache_shutdown_event shutdown_event;
8993 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
8994 pqi_flush_cache(ctrl_info, shutdown_event);
8997 pqi_stop_heartbeat_timer(ctrl_info);
8998 pqi_crash_if_pending_command(ctrl_info);
8999 pqi_free_irqs(ctrl_info);
9001 ctrl_info->controller_online = false;
9002 ctrl_info->pqi_mode_enabled = false;
9007 static __maybe_unused int pqi_suspend(struct device *dev)
9009 return pqi_suspend_or_freeze(dev, true);
9012 static int pqi_resume_or_restore(struct device *dev)
9015 struct pci_dev *pci_dev;
9016 struct pqi_ctrl_info *ctrl_info;
9018 pci_dev = to_pci_dev(dev);
9019 ctrl_info = pci_get_drvdata(pci_dev);
9021 rc = pqi_request_irqs(ctrl_info);
9025 pqi_ctrl_unblock_device_reset(ctrl_info);
9026 pqi_ctrl_unblock_requests(ctrl_info);
9027 pqi_scsi_unblock_requests(ctrl_info);
9028 pqi_ctrl_unblock_scan(ctrl_info);
9030 ssleep(PQI_POST_RESET_DELAY_SECS);
9032 return pqi_ctrl_init_resume(ctrl_info);
9035 static int pqi_freeze(struct device *dev)
9037 return pqi_suspend_or_freeze(dev, false);
9040 static int pqi_thaw(struct device *dev)
9043 struct pci_dev *pci_dev;
9044 struct pqi_ctrl_info *ctrl_info;
9046 pci_dev = to_pci_dev(dev);
9047 ctrl_info = pci_get_drvdata(pci_dev);
9049 rc = pqi_request_irqs(ctrl_info);
9053 ctrl_info->controller_online = true;
9054 ctrl_info->pqi_mode_enabled = true;
9056 pqi_ctrl_unblock_device_reset(ctrl_info);
9057 pqi_ctrl_unblock_requests(ctrl_info);
9058 pqi_scsi_unblock_requests(ctrl_info);
9059 pqi_ctrl_unblock_scan(ctrl_info);
9064 static int pqi_poweroff(struct device *dev)
9066 struct pci_dev *pci_dev;
9067 struct pqi_ctrl_info *ctrl_info;
9068 enum bmic_flush_cache_shutdown_event shutdown_event;
9070 pci_dev = to_pci_dev(dev);
9071 ctrl_info = pci_get_drvdata(pci_dev);
9073 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9074 pqi_flush_cache(ctrl_info, shutdown_event);
9079 static const struct dev_pm_ops pqi_pm_ops = {
9080 .suspend = pqi_suspend,
9081 .resume = pqi_resume_or_restore,
9082 .freeze = pqi_freeze,
9084 .poweroff = pqi_poweroff,
9085 .restore = pqi_resume_or_restore,
9088 #endif /* CONFIG_PM */
9090 /* Define the PCI IDs for the controllers that we support. */
9091 static const struct pci_device_id pqi_pci_id_table[] = {
9093 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9097 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9101 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9105 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9109 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9113 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9117 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9121 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9125 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9129 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9133 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9137 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9141 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9145 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9149 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9153 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9157 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9161 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9165 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9169 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9173 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9177 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9181 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9185 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9189 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9193 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9197 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9201 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9205 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9209 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9213 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9217 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9221 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9225 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9229 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9233 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9237 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9241 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9245 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9249 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9253 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9257 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9261 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9265 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9269 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9270 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9273 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9274 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
9277 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9278 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
9281 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9282 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
9285 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9286 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
9289 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9290 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
9293 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9294 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
9297 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9298 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
9301 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9302 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
9305 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9306 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9309 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9310 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9313 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9314 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9317 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9318 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9321 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9322 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
9325 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9326 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
9329 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9330 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
9333 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9334 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
9337 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9338 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
9341 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9342 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
9345 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9346 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
9349 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9350 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
9353 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9354 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
9357 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9358 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9361 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9362 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
9365 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9366 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
9369 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9370 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
9373 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9374 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
9377 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9378 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
9381 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9382 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9385 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9386 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
9389 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9390 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
9393 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9394 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9397 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9398 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9401 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9402 PCI_VENDOR_ID_ADAPTEC2, 0x1304)
9405 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9406 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9409 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9410 PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9413 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9414 PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9417 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9418 PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9421 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9422 PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9425 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9426 PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9429 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9430 PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9433 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9434 PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9437 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9438 PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9441 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9442 PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9445 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9446 PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9449 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9450 PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9453 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9454 PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9457 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9458 PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9461 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9462 PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9465 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9466 PCI_VENDOR_ID_ADAPTEC2, 0x1463)
9469 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9470 PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9473 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9474 PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9477 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9478 PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9481 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9482 PCI_VENDOR_ID_ADAPTEC2, 0x1473)
9485 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9486 PCI_VENDOR_ID_ADAPTEC2, 0x1474)
9489 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9490 PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9493 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9494 PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9497 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9498 PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9501 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9502 PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9505 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9506 PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9509 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9510 PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
9513 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9514 PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
9517 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9518 PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
9521 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9522 PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
9525 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9526 PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
9529 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9530 PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
9533 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9534 PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
9537 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9538 PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
9541 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9542 PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
9545 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9546 PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
9549 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9550 PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
9553 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9554 PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
9557 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9558 PCI_VENDOR_ID_ADVANTECH, 0x8312)
9561 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9562 PCI_VENDOR_ID_DELL, 0x1fe0)
9565 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9566 PCI_VENDOR_ID_HP, 0x0600)
9569 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9570 PCI_VENDOR_ID_HP, 0x0601)
9573 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9574 PCI_VENDOR_ID_HP, 0x0602)
9577 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9578 PCI_VENDOR_ID_HP, 0x0603)
9581 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9582 PCI_VENDOR_ID_HP, 0x0609)
9585 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9586 PCI_VENDOR_ID_HP, 0x0650)
9589 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9590 PCI_VENDOR_ID_HP, 0x0651)
9593 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9594 PCI_VENDOR_ID_HP, 0x0652)
9597 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9598 PCI_VENDOR_ID_HP, 0x0653)
9601 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9602 PCI_VENDOR_ID_HP, 0x0654)
9605 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9606 PCI_VENDOR_ID_HP, 0x0655)
9609 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9610 PCI_VENDOR_ID_HP, 0x0700)
9613 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9614 PCI_VENDOR_ID_HP, 0x0701)
9617 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9618 PCI_VENDOR_ID_HP, 0x1001)
9621 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9622 PCI_VENDOR_ID_HP, 0x1002)
9625 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9626 PCI_VENDOR_ID_HP, 0x1100)
9629 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9630 PCI_VENDOR_ID_HP, 0x1101)
9633 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9637 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9641 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9645 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9649 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9653 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9657 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9661 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9665 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9669 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9673 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9677 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9681 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9682 PCI_VENDOR_ID_GIGABYTE, 0x1000)
9685 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9689 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9693 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9697 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9701 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9705 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9709 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9713 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9717 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9721 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9725 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9729 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9733 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9737 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9741 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9742 PCI_ANY_ID, PCI_ANY_ID)
9747 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
9749 static struct pci_driver pqi_pci_driver = {
9750 .name = DRIVER_NAME_SHORT,
9751 .id_table = pqi_pci_id_table,
9752 .probe = pqi_pci_probe,
9753 .remove = pqi_pci_remove,
9754 .shutdown = pqi_shutdown,
9755 #if defined(CONFIG_PM)
9762 static int __init pqi_init(void)
9766 pr_info(DRIVER_NAME "\n");
9767 pqi_verify_structures();
9768 sis_verify_structures();
9770 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
9771 if (!pqi_sas_transport_template)
9774 pqi_process_module_params();
9776 rc = pci_register_driver(&pqi_pci_driver);
9778 sas_release_transport(pqi_sas_transport_template);
9783 static void __exit pqi_cleanup(void)
9785 pci_unregister_driver(&pqi_pci_driver);
9786 sas_release_transport(pqi_sas_transport_template);
9789 module_init(pqi_init);
9790 module_exit(pqi_cleanup);
9792 static void pqi_verify_structures(void)
9794 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9795 sis_host_to_ctrl_doorbell) != 0x20);
9796 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9797 sis_interrupt_mask) != 0x34);
9798 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9799 sis_ctrl_to_host_doorbell) != 0x9c);
9800 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9801 sis_ctrl_to_host_doorbell_clear) != 0xa0);
9802 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9803 sis_driver_scratch) != 0xb0);
9804 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9805 sis_product_identifier) != 0xb4);
9806 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9807 sis_firmware_status) != 0xbc);
9808 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9809 sis_ctrl_shutdown_reason_code) != 0xcc);
9810 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9811 sis_mailbox) != 0x1000);
9812 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9813 pqi_registers) != 0x4000);
9815 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9817 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9819 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9820 response_queue_id) != 0x4);
9821 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9822 driver_flags) != 0x6);
9823 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
9825 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9827 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9828 service_response) != 0x1);
9829 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9830 data_present) != 0x2);
9831 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9833 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9834 residual_count) != 0x4);
9835 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9836 data_length) != 0x8);
9837 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9839 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9841 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
9843 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9844 data_in_result) != 0x0);
9845 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9846 data_out_result) != 0x1);
9847 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9849 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9851 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9852 status_qualifier) != 0x6);
9853 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9854 sense_data_length) != 0x8);
9855 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9856 response_data_length) != 0xa);
9857 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9858 data_in_transferred) != 0xc);
9859 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9860 data_out_transferred) != 0x10);
9861 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9863 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
9865 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9867 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9868 function_and_status_code) != 0x8);
9869 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9870 max_admin_iq_elements) != 0x10);
9871 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9872 max_admin_oq_elements) != 0x11);
9873 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9874 admin_iq_element_length) != 0x12);
9875 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9876 admin_oq_element_length) != 0x13);
9877 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9878 max_reset_timeout) != 0x14);
9879 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9880 legacy_intx_status) != 0x18);
9881 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9882 legacy_intx_mask_set) != 0x1c);
9883 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9884 legacy_intx_mask_clear) != 0x20);
9885 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9886 device_status) != 0x40);
9887 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9888 admin_iq_pi_offset) != 0x48);
9889 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9890 admin_oq_ci_offset) != 0x50);
9891 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9892 admin_iq_element_array_addr) != 0x58);
9893 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9894 admin_oq_element_array_addr) != 0x60);
9895 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9896 admin_iq_ci_addr) != 0x68);
9897 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9898 admin_oq_pi_addr) != 0x70);
9899 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9900 admin_iq_num_elements) != 0x78);
9901 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9902 admin_oq_num_elements) != 0x79);
9903 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9904 admin_queue_int_msg_num) != 0x7a);
9905 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9906 device_error) != 0x80);
9907 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9908 error_details) != 0x88);
9909 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9910 device_reset) != 0x90);
9911 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9912 power_action) != 0x94);
9913 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
9915 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9916 header.iu_type) != 0);
9917 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9918 header.iu_length) != 2);
9919 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9920 header.driver_flags) != 6);
9921 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9923 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9924 function_code) != 10);
9925 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9926 data.report_device_capability.buffer_length) != 44);
9927 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9928 data.report_device_capability.sg_descriptor) != 48);
9929 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9930 data.create_operational_iq.queue_id) != 12);
9931 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9932 data.create_operational_iq.element_array_addr) != 16);
9933 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9934 data.create_operational_iq.ci_addr) != 24);
9935 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9936 data.create_operational_iq.num_elements) != 32);
9937 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9938 data.create_operational_iq.element_length) != 34);
9939 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9940 data.create_operational_iq.queue_protocol) != 36);
9941 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9942 data.create_operational_oq.queue_id) != 12);
9943 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9944 data.create_operational_oq.element_array_addr) != 16);
9945 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9946 data.create_operational_oq.pi_addr) != 24);
9947 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9948 data.create_operational_oq.num_elements) != 32);
9949 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9950 data.create_operational_oq.element_length) != 34);
9951 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9952 data.create_operational_oq.queue_protocol) != 36);
9953 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9954 data.create_operational_oq.int_msg_num) != 40);
9955 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9956 data.create_operational_oq.coalescing_count) != 42);
9957 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9958 data.create_operational_oq.min_coalescing_time) != 44);
9959 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9960 data.create_operational_oq.max_coalescing_time) != 48);
9961 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9962 data.delete_operational_queue.queue_id) != 12);
9963 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
9964 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9965 data.create_operational_iq) != 64 - 11);
9966 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9967 data.create_operational_oq) != 64 - 11);
9968 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9969 data.delete_operational_queue) != 64 - 11);
9971 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9972 header.iu_type) != 0);
9973 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9974 header.iu_length) != 2);
9975 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9976 header.driver_flags) != 6);
9977 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9979 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9980 function_code) != 10);
9981 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9983 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9984 data.create_operational_iq.status_descriptor) != 12);
9985 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9986 data.create_operational_iq.iq_pi_offset) != 16);
9987 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9988 data.create_operational_oq.status_descriptor) != 12);
9989 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9990 data.create_operational_oq.oq_ci_offset) != 16);
9991 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
9993 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9994 header.iu_type) != 0);
9995 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9996 header.iu_length) != 2);
9997 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9998 header.response_queue_id) != 4);
9999 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10000 header.driver_flags) != 6);
10001 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10003 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10005 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10006 buffer_length) != 12);
10007 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10008 lun_number) != 16);
10009 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10010 protocol_specific) != 24);
10011 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10012 error_index) != 27);
10013 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10015 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10017 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10018 sg_descriptors) != 64);
10019 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10020 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10022 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10023 header.iu_type) != 0);
10024 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10025 header.iu_length) != 2);
10026 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10027 header.response_queue_id) != 4);
10028 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10029 header.driver_flags) != 6);
10030 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10032 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10034 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10035 buffer_length) != 16);
10036 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10037 data_encryption_key_index) != 22);
10038 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10039 encrypt_tweak_lower) != 24);
10040 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10041 encrypt_tweak_upper) != 28);
10042 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10044 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10045 error_index) != 48);
10046 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10047 num_sg_descriptors) != 50);
10048 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10049 cdb_length) != 51);
10050 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10051 lun_number) != 52);
10052 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10053 sg_descriptors) != 64);
10054 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10055 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10057 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10058 header.iu_type) != 0);
10059 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10060 header.iu_length) != 2);
10061 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10063 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10064 error_index) != 10);
10066 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10067 header.iu_type) != 0);
10068 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10069 header.iu_length) != 2);
10070 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10071 header.response_queue_id) != 4);
10072 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10074 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10075 data.report_event_configuration.buffer_length) != 12);
10076 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10077 data.report_event_configuration.sg_descriptors) != 16);
10078 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10079 data.set_event_configuration.global_event_oq_id) != 10);
10080 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10081 data.set_event_configuration.buffer_length) != 12);
10082 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10083 data.set_event_configuration.sg_descriptors) != 16);
10085 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10086 max_inbound_iu_length) != 6);
10087 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10088 max_outbound_iu_length) != 14);
10089 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
10091 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10092 data_length) != 0);
10093 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10094 iq_arbitration_priority_support_bitmask) != 8);
10095 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10096 maximum_aw_a) != 9);
10097 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10098 maximum_aw_b) != 10);
10099 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10100 maximum_aw_c) != 11);
10101 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10102 max_inbound_queues) != 16);
10103 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10104 max_elements_per_iq) != 18);
10105 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10106 max_iq_element_length) != 24);
10107 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10108 min_iq_element_length) != 26);
10109 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10110 max_outbound_queues) != 30);
10111 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10112 max_elements_per_oq) != 32);
10113 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10114 intr_coalescing_time_granularity) != 34);
10115 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10116 max_oq_element_length) != 36);
10117 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10118 min_oq_element_length) != 38);
10119 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10120 iu_layer_descriptors) != 64);
10121 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
10123 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10125 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10127 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
10129 BUILD_BUG_ON(offsetof(struct pqi_event_config,
10130 num_event_descriptors) != 2);
10131 BUILD_BUG_ON(offsetof(struct pqi_event_config,
10132 descriptors) != 4);
10134 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
10135 ARRAY_SIZE(pqi_supported_event_types));
10137 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10138 header.iu_type) != 0);
10139 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10140 header.iu_length) != 2);
10141 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10143 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10145 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10146 additional_event_id) != 12);
10147 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10149 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
10151 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10152 header.iu_type) != 0);
10153 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10154 header.iu_length) != 2);
10155 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10157 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10159 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10160 additional_event_id) != 12);
10161 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
10163 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10164 header.iu_type) != 0);
10165 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10166 header.iu_length) != 2);
10167 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10169 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10171 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10173 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10174 lun_number) != 16);
10175 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10176 protocol_specific) != 24);
10177 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10178 outbound_queue_id_to_manage) != 26);
10179 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10180 request_id_to_manage) != 28);
10181 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10182 task_management_function) != 30);
10183 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
10185 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10186 header.iu_type) != 0);
10187 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10188 header.iu_length) != 2);
10189 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10191 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10193 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10194 additional_response_info) != 12);
10195 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10196 response_code) != 15);
10197 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
10199 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10200 configured_logical_drive_count) != 0);
10201 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10202 configuration_signature) != 1);
10203 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10204 firmware_version_short) != 5);
10205 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10206 extended_logical_unit_count) != 154);
10207 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10208 firmware_build_number) != 190);
10209 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10210 vendor_id) != 200);
10211 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10212 product_id) != 208);
10213 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10214 extra_controller_flags) != 286);
10215 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10216 controller_mode) != 292);
10217 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10218 spare_part_number) != 293);
10219 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10220 firmware_version_long) != 325);
10222 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10223 phys_bay_in_box) != 115);
10224 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10225 device_type) != 120);
10226 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10227 redundant_path_present_map) != 1736);
10228 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10229 active_path_number) != 1738);
10230 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10231 alternate_paths_phys_connector) != 1739);
10232 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10233 alternate_paths_phys_box_on_port) != 1755);
10234 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10235 current_queue_depth_limit) != 1796);
10236 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
10238 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
10239 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10241 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10242 subpage_code) != 1);
10243 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10244 buffer_length) != 2);
10246 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
10247 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10249 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10250 subpage_code) != 1);
10251 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10252 page_length) != 2);
10254 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
10256 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10258 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10259 firmware_read_support) != 4);
10260 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10261 driver_read_support) != 5);
10262 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10263 firmware_write_support) != 6);
10264 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10265 driver_write_support) != 7);
10266 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10267 max_transfer_encrypted_sas_sata) != 8);
10268 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10269 max_transfer_encrypted_nvme) != 10);
10270 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10271 max_write_raid_5_6) != 12);
10272 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10273 max_write_raid_1_10_2drive) != 14);
10274 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10275 max_write_raid_1_10_3drive) != 16);
10277 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
10278 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
10279 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
10280 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10281 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
10282 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10283 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
10284 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
10285 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10286 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
10287 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
10288 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10290 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
10291 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
10292 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);