GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / scsi / smartpqi / smartpqi_init.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    driver for Microchip PQI-based storage controllers
4  *    Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
5  *    Copyright (c) 2016-2018 Microsemi Corporation
6  *    Copyright (c) 2016 PMC-Sierra, Inc.
7  *
8  *    Questions/Comments/Bugfixes to storagedev@microchip.com
9  *
10  */
11
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
29 #include "smartpqi.h"
30 #include "smartpqi_sis.h"
31
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
34 #endif
35
36 #define DRIVER_VERSION          "2.1.14-035"
37 #define DRIVER_MAJOR            2
38 #define DRIVER_MINOR            1
39 #define DRIVER_RELEASE          14
40 #define DRIVER_REVISION         35
41
42 #define DRIVER_NAME             "Microchip SmartPQI Driver (v" \
43                                 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT       "smartpqi"
45
46 #define PQI_EXTRA_SGL_MEMORY    (12 * sizeof(struct pqi_sg_descriptor))
47
48 #define PQI_POST_RESET_DELAY_SECS                       5
49 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS      10
50
51 MODULE_AUTHOR("Microchip");
52 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
53         DRIVER_VERSION);
54 MODULE_VERSION(DRIVER_VERSION);
55 MODULE_LICENSE("GPL");
56
57 struct pqi_cmd_priv {
58         int this_residual;
59 };
60
61 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
62 {
63         return scsi_cmd_priv(cmd);
64 }
65
66 static void pqi_verify_structures(void);
67 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
68         enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
69 static void pqi_ctrl_offline_worker(struct work_struct *work);
70 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
71 static void pqi_scan_start(struct Scsi_Host *shost);
72 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
73         struct pqi_queue_group *queue_group, enum pqi_io_path path,
74         struct pqi_io_request *io_request);
75 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
76         struct pqi_iu_header *request, unsigned int flags,
77         struct pqi_raid_error_info *error_info);
78 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
79         struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
80         unsigned int cdb_length, struct pqi_queue_group *queue_group,
81         struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
82 static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
83         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
84         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
85         struct pqi_scsi_dev_raid_map_data *rmd);
86 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
87         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
88         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
89         struct pqi_scsi_dev_raid_map_data *rmd);
90 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
91 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
92 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
93 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
94 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
95 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
96 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
97         struct pqi_scsi_dev *device, unsigned long timeout_msecs);
98
99 /* for flags argument to pqi_submit_raid_request_synchronous() */
100 #define PQI_SYNC_FLAGS_INTERRUPTABLE    0x1
101
102 static struct scsi_transport_template *pqi_sas_transport_template;
103
104 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
105
106 enum pqi_lockup_action {
107         NONE,
108         REBOOT,
109         PANIC
110 };
111
112 static enum pqi_lockup_action pqi_lockup_action = NONE;
113
114 static struct {
115         enum pqi_lockup_action  action;
116         char                    *name;
117 } pqi_lockup_actions[] = {
118         {
119                 .action = NONE,
120                 .name = "none",
121         },
122         {
123                 .action = REBOOT,
124                 .name = "reboot",
125         },
126         {
127                 .action = PANIC,
128                 .name = "panic",
129         },
130 };
131
132 static unsigned int pqi_supported_event_types[] = {
133         PQI_EVENT_TYPE_HOTPLUG,
134         PQI_EVENT_TYPE_HARDWARE,
135         PQI_EVENT_TYPE_PHYSICAL_DEVICE,
136         PQI_EVENT_TYPE_LOGICAL_DEVICE,
137         PQI_EVENT_TYPE_OFA,
138         PQI_EVENT_TYPE_AIO_STATE_CHANGE,
139         PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
140 };
141
142 static int pqi_disable_device_id_wildcards;
143 module_param_named(disable_device_id_wildcards,
144         pqi_disable_device_id_wildcards, int, 0644);
145 MODULE_PARM_DESC(disable_device_id_wildcards,
146         "Disable device ID wildcards.");
147
148 static int pqi_disable_heartbeat;
149 module_param_named(disable_heartbeat,
150         pqi_disable_heartbeat, int, 0644);
151 MODULE_PARM_DESC(disable_heartbeat,
152         "Disable heartbeat.");
153
154 static int pqi_disable_ctrl_shutdown;
155 module_param_named(disable_ctrl_shutdown,
156         pqi_disable_ctrl_shutdown, int, 0644);
157 MODULE_PARM_DESC(disable_ctrl_shutdown,
158         "Disable controller shutdown when controller locked up.");
159
160 static char *pqi_lockup_action_param;
161 module_param_named(lockup_action,
162         pqi_lockup_action_param, charp, 0644);
163 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
164         "\t\tSupported: none, reboot, panic\n"
165         "\t\tDefault: none");
166
167 static int pqi_expose_ld_first;
168 module_param_named(expose_ld_first,
169         pqi_expose_ld_first, int, 0644);
170 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
171
172 static int pqi_hide_vsep;
173 module_param_named(hide_vsep,
174         pqi_hide_vsep, int, 0644);
175 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
176
177 static char *raid_levels[] = {
178         "RAID-0",
179         "RAID-4",
180         "RAID-1(1+0)",
181         "RAID-5",
182         "RAID-5+1",
183         "RAID-6",
184         "RAID-1(Triple)",
185 };
186
187 static char *pqi_raid_level_to_string(u8 raid_level)
188 {
189         if (raid_level < ARRAY_SIZE(raid_levels))
190                 return raid_levels[raid_level];
191
192         return "RAID UNKNOWN";
193 }
194
195 #define SA_RAID_0               0
196 #define SA_RAID_4               1
197 #define SA_RAID_1               2       /* also used for RAID 10 */
198 #define SA_RAID_5               3       /* also used for RAID 50 */
199 #define SA_RAID_51              4
200 #define SA_RAID_6               5       /* also used for RAID 60 */
201 #define SA_RAID_TRIPLE          6       /* also used for RAID 1+0 Triple */
202 #define SA_RAID_MAX             SA_RAID_TRIPLE
203 #define SA_RAID_UNKNOWN         0xff
204
205 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
206 {
207         pqi_prep_for_scsi_done(scmd);
208         scsi_done(scmd);
209 }
210
211 static inline void pqi_disable_write_same(struct scsi_device *sdev)
212 {
213         sdev->no_write_same = 1;
214 }
215
216 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
217 {
218         return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
219 }
220
221 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
222 {
223         return !device->is_physical_device;
224 }
225
226 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
227 {
228         return scsi3addr[2] != 0;
229 }
230
231 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
232 {
233         return !ctrl_info->controller_online;
234 }
235
236 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
237 {
238         if (ctrl_info->controller_online)
239                 if (!sis_is_firmware_running(ctrl_info))
240                         pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
241 }
242
243 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
244 {
245         return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
246 }
247
248 #define PQI_DRIVER_SCRATCH_PQI_MODE                     0x1
249 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED          0x2
250
251 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
252 {
253         return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
254 }
255
256 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
257         enum pqi_ctrl_mode mode)
258 {
259         u32 driver_scratch;
260
261         driver_scratch = sis_read_driver_scratch(ctrl_info);
262
263         if (mode == PQI_MODE)
264                 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
265         else
266                 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
267
268         sis_write_driver_scratch(ctrl_info, driver_scratch);
269 }
270
271 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
272 {
273         return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
274 }
275
276 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
277 {
278         u32 driver_scratch;
279
280         driver_scratch = sis_read_driver_scratch(ctrl_info);
281
282         if (is_supported)
283                 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
284         else
285                 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
286
287         sis_write_driver_scratch(ctrl_info, driver_scratch);
288 }
289
290 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
291 {
292         ctrl_info->scan_blocked = true;
293         mutex_lock(&ctrl_info->scan_mutex);
294 }
295
296 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
297 {
298         ctrl_info->scan_blocked = false;
299         mutex_unlock(&ctrl_info->scan_mutex);
300 }
301
302 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
303 {
304         return ctrl_info->scan_blocked;
305 }
306
307 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
308 {
309         mutex_lock(&ctrl_info->lun_reset_mutex);
310 }
311
312 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
313 {
314         mutex_unlock(&ctrl_info->lun_reset_mutex);
315 }
316
317 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
318 {
319         struct Scsi_Host *shost;
320         unsigned int num_loops;
321         int msecs_sleep;
322
323         shost = ctrl_info->scsi_host;
324
325         scsi_block_requests(shost);
326
327         num_loops = 0;
328         msecs_sleep = 20;
329         while (scsi_host_busy(shost)) {
330                 num_loops++;
331                 if (num_loops == 10)
332                         msecs_sleep = 500;
333                 msleep(msecs_sleep);
334         }
335 }
336
337 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
338 {
339         scsi_unblock_requests(ctrl_info->scsi_host);
340 }
341
342 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
343 {
344         atomic_inc(&ctrl_info->num_busy_threads);
345 }
346
347 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
348 {
349         atomic_dec(&ctrl_info->num_busy_threads);
350 }
351
352 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
353 {
354         return ctrl_info->block_requests;
355 }
356
357 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
358 {
359         ctrl_info->block_requests = true;
360 }
361
362 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
363 {
364         ctrl_info->block_requests = false;
365         wake_up_all(&ctrl_info->block_requests_wait);
366 }
367
368 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
369 {
370         if (!pqi_ctrl_blocked(ctrl_info))
371                 return;
372
373         atomic_inc(&ctrl_info->num_blocked_threads);
374         wait_event(ctrl_info->block_requests_wait,
375                 !pqi_ctrl_blocked(ctrl_info));
376         atomic_dec(&ctrl_info->num_blocked_threads);
377 }
378
379 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS                10
380
381 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
382 {
383         unsigned long start_jiffies;
384         unsigned long warning_timeout;
385         bool displayed_warning;
386
387         displayed_warning = false;
388         start_jiffies = jiffies;
389         warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
390
391         while (atomic_read(&ctrl_info->num_busy_threads) >
392                 atomic_read(&ctrl_info->num_blocked_threads)) {
393                 if (time_after(jiffies, warning_timeout)) {
394                         dev_warn(&ctrl_info->pci_dev->dev,
395                                 "waiting %u seconds for driver activity to quiesce\n",
396                                 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
397                         displayed_warning = true;
398                         warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
399                 }
400                 usleep_range(1000, 2000);
401         }
402
403         if (displayed_warning)
404                 dev_warn(&ctrl_info->pci_dev->dev,
405                         "driver activity quiesced after waiting for %u seconds\n",
406                         jiffies_to_msecs(jiffies - start_jiffies) / 1000);
407 }
408
409 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
410 {
411         return device->device_offline;
412 }
413
414 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
415 {
416         mutex_lock(&ctrl_info->ofa_mutex);
417 }
418
419 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
420 {
421         mutex_unlock(&ctrl_info->ofa_mutex);
422 }
423
424 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
425 {
426         mutex_lock(&ctrl_info->ofa_mutex);
427         mutex_unlock(&ctrl_info->ofa_mutex);
428 }
429
430 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
431 {
432         return mutex_is_locked(&ctrl_info->ofa_mutex);
433 }
434
435 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
436 {
437         device->in_remove = true;
438 }
439
440 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
441 {
442         return device->in_remove;
443 }
444
445 static inline int pqi_event_type_to_event_index(unsigned int event_type)
446 {
447         int index;
448
449         for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
450                 if (event_type == pqi_supported_event_types[index])
451                         return index;
452
453         return -1;
454 }
455
456 static inline bool pqi_is_supported_event(unsigned int event_type)
457 {
458         return pqi_event_type_to_event_index(event_type) != -1;
459 }
460
461 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
462         unsigned long delay)
463 {
464         if (pqi_ctrl_offline(ctrl_info))
465                 return;
466
467         schedule_delayed_work(&ctrl_info->rescan_work, delay);
468 }
469
470 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
471 {
472         pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
473 }
474
475 #define PQI_RESCAN_WORK_DELAY   (10 * HZ)
476
477 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
478 {
479         pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
480 }
481
482 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
483 {
484         cancel_delayed_work_sync(&ctrl_info->rescan_work);
485 }
486
487 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
488 {
489         if (!ctrl_info->heartbeat_counter)
490                 return 0;
491
492         return readl(ctrl_info->heartbeat_counter);
493 }
494
495 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
496 {
497         return readb(ctrl_info->soft_reset_status);
498 }
499
500 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
501 {
502         u8 status;
503
504         status = pqi_read_soft_reset_status(ctrl_info);
505         status &= ~PQI_SOFT_RESET_ABORT;
506         writeb(status, ctrl_info->soft_reset_status);
507 }
508
509 static int pqi_map_single(struct pci_dev *pci_dev,
510         struct pqi_sg_descriptor *sg_descriptor, void *buffer,
511         size_t buffer_length, enum dma_data_direction data_direction)
512 {
513         dma_addr_t bus_address;
514
515         if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
516                 return 0;
517
518         bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
519                 data_direction);
520         if (dma_mapping_error(&pci_dev->dev, bus_address))
521                 return -ENOMEM;
522
523         put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
524         put_unaligned_le32(buffer_length, &sg_descriptor->length);
525         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
526
527         return 0;
528 }
529
530 static void pqi_pci_unmap(struct pci_dev *pci_dev,
531         struct pqi_sg_descriptor *descriptors, int num_descriptors,
532         enum dma_data_direction data_direction)
533 {
534         int i;
535
536         if (data_direction == DMA_NONE)
537                 return;
538
539         for (i = 0; i < num_descriptors; i++)
540                 dma_unmap_single(&pci_dev->dev,
541                         (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
542                         get_unaligned_le32(&descriptors[i].length),
543                         data_direction);
544 }
545
546 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
547         struct pqi_raid_path_request *request, u8 cmd,
548         u8 *scsi3addr, void *buffer, size_t buffer_length,
549         u16 vpd_page, enum dma_data_direction *dir)
550 {
551         u8 *cdb;
552         size_t cdb_length = buffer_length;
553
554         memset(request, 0, sizeof(*request));
555
556         request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
557         put_unaligned_le16(offsetof(struct pqi_raid_path_request,
558                 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
559                 &request->header.iu_length);
560         put_unaligned_le32(buffer_length, &request->buffer_length);
561         memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
562         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
563         request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
564
565         cdb = request->cdb;
566
567         switch (cmd) {
568         case TEST_UNIT_READY:
569                 request->data_direction = SOP_READ_FLAG;
570                 cdb[0] = TEST_UNIT_READY;
571                 break;
572         case INQUIRY:
573                 request->data_direction = SOP_READ_FLAG;
574                 cdb[0] = INQUIRY;
575                 if (vpd_page & VPD_PAGE) {
576                         cdb[1] = 0x1;
577                         cdb[2] = (u8)vpd_page;
578                 }
579                 cdb[4] = (u8)cdb_length;
580                 break;
581         case CISS_REPORT_LOG:
582         case CISS_REPORT_PHYS:
583                 request->data_direction = SOP_READ_FLAG;
584                 cdb[0] = cmd;
585                 if (cmd == CISS_REPORT_PHYS) {
586                         if (ctrl_info->rpl_extended_format_4_5_supported)
587                                 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
588                         else
589                                 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
590                 } else {
591                         cdb[1] = ctrl_info->ciss_report_log_flags;
592                 }
593                 put_unaligned_be32(cdb_length, &cdb[6]);
594                 break;
595         case CISS_GET_RAID_MAP:
596                 request->data_direction = SOP_READ_FLAG;
597                 cdb[0] = CISS_READ;
598                 cdb[1] = CISS_GET_RAID_MAP;
599                 put_unaligned_be32(cdb_length, &cdb[6]);
600                 break;
601         case SA_FLUSH_CACHE:
602                 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
603                 request->data_direction = SOP_WRITE_FLAG;
604                 cdb[0] = BMIC_WRITE;
605                 cdb[6] = BMIC_FLUSH_CACHE;
606                 put_unaligned_be16(cdb_length, &cdb[7]);
607                 break;
608         case BMIC_SENSE_DIAG_OPTIONS:
609                 cdb_length = 0;
610                 fallthrough;
611         case BMIC_IDENTIFY_CONTROLLER:
612         case BMIC_IDENTIFY_PHYSICAL_DEVICE:
613         case BMIC_SENSE_SUBSYSTEM_INFORMATION:
614         case BMIC_SENSE_FEATURE:
615                 request->data_direction = SOP_READ_FLAG;
616                 cdb[0] = BMIC_READ;
617                 cdb[6] = cmd;
618                 put_unaligned_be16(cdb_length, &cdb[7]);
619                 break;
620         case BMIC_SET_DIAG_OPTIONS:
621                 cdb_length = 0;
622                 fallthrough;
623         case BMIC_WRITE_HOST_WELLNESS:
624                 request->data_direction = SOP_WRITE_FLAG;
625                 cdb[0] = BMIC_WRITE;
626                 cdb[6] = cmd;
627                 put_unaligned_be16(cdb_length, &cdb[7]);
628                 break;
629         case BMIC_CSMI_PASSTHRU:
630                 request->data_direction = SOP_BIDIRECTIONAL;
631                 cdb[0] = BMIC_WRITE;
632                 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
633                 cdb[6] = cmd;
634                 put_unaligned_be16(cdb_length, &cdb[7]);
635                 break;
636         default:
637                 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
638                 break;
639         }
640
641         switch (request->data_direction) {
642         case SOP_READ_FLAG:
643                 *dir = DMA_FROM_DEVICE;
644                 break;
645         case SOP_WRITE_FLAG:
646                 *dir = DMA_TO_DEVICE;
647                 break;
648         case SOP_NO_DIRECTION_FLAG:
649                 *dir = DMA_NONE;
650                 break;
651         default:
652                 *dir = DMA_BIDIRECTIONAL;
653                 break;
654         }
655
656         return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
657                 buffer, buffer_length, *dir);
658 }
659
660 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
661 {
662         io_request->scmd = NULL;
663         io_request->status = 0;
664         io_request->error_info = NULL;
665         io_request->raid_bypass = false;
666 }
667
668 static struct pqi_io_request *pqi_alloc_io_request(
669         struct pqi_ctrl_info *ctrl_info)
670 {
671         struct pqi_io_request *io_request;
672         u16 i = ctrl_info->next_io_request_slot;        /* benignly racy */
673
674         while (1) {
675                 io_request = &ctrl_info->io_request_pool[i];
676                 if (atomic_inc_return(&io_request->refcount) == 1)
677                         break;
678                 atomic_dec(&io_request->refcount);
679                 i = (i + 1) % ctrl_info->max_io_slots;
680         }
681
682         /* benignly racy */
683         ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
684
685         pqi_reinit_io_request(io_request);
686
687         return io_request;
688 }
689
690 static void pqi_free_io_request(struct pqi_io_request *io_request)
691 {
692         atomic_dec(&io_request->refcount);
693 }
694
695 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
696         u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
697         struct pqi_raid_error_info *error_info)
698 {
699         int rc;
700         struct pqi_raid_path_request request;
701         enum dma_data_direction dir;
702
703         rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
704                 buffer, buffer_length, vpd_page, &dir);
705         if (rc)
706                 return rc;
707
708         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
709
710         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
711
712         return rc;
713 }
714
715 /* helper functions for pqi_send_scsi_raid_request */
716
717 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
718         u8 cmd, void *buffer, size_t buffer_length)
719 {
720         return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
721                 buffer, buffer_length, 0, NULL);
722 }
723
724 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
725         u8 cmd, void *buffer, size_t buffer_length,
726         struct pqi_raid_error_info *error_info)
727 {
728         return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
729                 buffer, buffer_length, 0, error_info);
730 }
731
732 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
733         struct bmic_identify_controller *buffer)
734 {
735         return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
736                 buffer, sizeof(*buffer));
737 }
738
739 static inline int pqi_sense_subsystem_info(struct  pqi_ctrl_info *ctrl_info,
740         struct bmic_sense_subsystem_info *sense_info)
741 {
742         return pqi_send_ctrl_raid_request(ctrl_info,
743                 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
744                 sizeof(*sense_info));
745 }
746
747 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
748         u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
749 {
750         return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
751                 buffer, buffer_length, vpd_page, NULL);
752 }
753
754 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
755         struct pqi_scsi_dev *device,
756         struct bmic_identify_physical_device *buffer, size_t buffer_length)
757 {
758         int rc;
759         enum dma_data_direction dir;
760         u16 bmic_device_index;
761         struct pqi_raid_path_request request;
762
763         rc = pqi_build_raid_path_request(ctrl_info, &request,
764                 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
765                 buffer_length, 0, &dir);
766         if (rc)
767                 return rc;
768
769         bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
770         request.cdb[2] = (u8)bmic_device_index;
771         request.cdb[9] = (u8)(bmic_device_index >> 8);
772
773         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
774
775         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
776
777         return rc;
778 }
779
780 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
781 {
782         u32 bytes;
783
784         bytes = get_unaligned_le16(limit);
785         if (bytes == 0)
786                 bytes = ~0;
787         else
788                 bytes *= 1024;
789
790         return bytes;
791 }
792
793 #pragma pack(1)
794
795 struct bmic_sense_feature_buffer {
796         struct bmic_sense_feature_buffer_header header;
797         struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
798 };
799
800 #pragma pack()
801
802 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH       \
803         offsetofend(struct bmic_sense_feature_buffer, \
804                 aio_subpage.max_write_raid_1_10_3drive)
805
806 #define MINIMUM_AIO_SUBPAGE_LENGTH      \
807         (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
808                 max_write_raid_1_10_3drive) - \
809                 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
810
811 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
812 {
813         int rc;
814         enum dma_data_direction dir;
815         struct pqi_raid_path_request request;
816         struct bmic_sense_feature_buffer *buffer;
817
818         buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
819         if (!buffer)
820                 return -ENOMEM;
821
822         rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
823                 buffer, sizeof(*buffer), 0, &dir);
824         if (rc)
825                 goto error;
826
827         request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
828         request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
829
830         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
831
832         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
833
834         if (rc)
835                 goto error;
836
837         if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
838                 buffer->header.subpage_code !=
839                         BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
840                 get_unaligned_le16(&buffer->header.buffer_length) <
841                         MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
842                 buffer->aio_subpage.header.page_code !=
843                         BMIC_SENSE_FEATURE_IO_PAGE ||
844                 buffer->aio_subpage.header.subpage_code !=
845                         BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
846                 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
847                         MINIMUM_AIO_SUBPAGE_LENGTH) {
848                 goto error;
849         }
850
851         ctrl_info->max_transfer_encrypted_sas_sata =
852                 pqi_aio_limit_to_bytes(
853                         &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
854
855         ctrl_info->max_transfer_encrypted_nvme =
856                 pqi_aio_limit_to_bytes(
857                         &buffer->aio_subpage.max_transfer_encrypted_nvme);
858
859         ctrl_info->max_write_raid_5_6 =
860                 pqi_aio_limit_to_bytes(
861                         &buffer->aio_subpage.max_write_raid_5_6);
862
863         ctrl_info->max_write_raid_1_10_2drive =
864                 pqi_aio_limit_to_bytes(
865                         &buffer->aio_subpage.max_write_raid_1_10_2drive);
866
867         ctrl_info->max_write_raid_1_10_3drive =
868                 pqi_aio_limit_to_bytes(
869                         &buffer->aio_subpage.max_write_raid_1_10_3drive);
870
871 error:
872         kfree(buffer);
873
874         return rc;
875 }
876
877 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
878         enum bmic_flush_cache_shutdown_event shutdown_event)
879 {
880         int rc;
881         struct bmic_flush_cache *flush_cache;
882
883         flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
884         if (!flush_cache)
885                 return -ENOMEM;
886
887         flush_cache->shutdown_event = shutdown_event;
888
889         rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
890                 sizeof(*flush_cache));
891
892         kfree(flush_cache);
893
894         return rc;
895 }
896
897 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
898         struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
899         struct pqi_raid_error_info *error_info)
900 {
901         return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
902                 buffer, buffer_length, error_info);
903 }
904
905 #define PQI_FETCH_PTRAID_DATA           (1 << 31)
906
907 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
908 {
909         int rc;
910         struct bmic_diag_options *diag;
911
912         diag = kzalloc(sizeof(*diag), GFP_KERNEL);
913         if (!diag)
914                 return -ENOMEM;
915
916         rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
917                 diag, sizeof(*diag));
918         if (rc)
919                 goto out;
920
921         diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
922
923         rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
924                 sizeof(*diag));
925
926 out:
927         kfree(diag);
928
929         return rc;
930 }
931
932 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
933         void *buffer, size_t buffer_length)
934 {
935         return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
936                 buffer, buffer_length);
937 }
938
939 #pragma pack(1)
940
941 struct bmic_host_wellness_driver_version {
942         u8      start_tag[4];
943         u8      driver_version_tag[2];
944         __le16  driver_version_length;
945         char    driver_version[32];
946         u8      dont_write_tag[2];
947         u8      end_tag[2];
948 };
949
950 #pragma pack()
951
952 static int pqi_write_driver_version_to_host_wellness(
953         struct pqi_ctrl_info *ctrl_info)
954 {
955         int rc;
956         struct bmic_host_wellness_driver_version *buffer;
957         size_t buffer_length;
958
959         buffer_length = sizeof(*buffer);
960
961         buffer = kmalloc(buffer_length, GFP_KERNEL);
962         if (!buffer)
963                 return -ENOMEM;
964
965         buffer->start_tag[0] = '<';
966         buffer->start_tag[1] = 'H';
967         buffer->start_tag[2] = 'W';
968         buffer->start_tag[3] = '>';
969         buffer->driver_version_tag[0] = 'D';
970         buffer->driver_version_tag[1] = 'V';
971         put_unaligned_le16(sizeof(buffer->driver_version),
972                 &buffer->driver_version_length);
973         strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
974                 sizeof(buffer->driver_version) - 1);
975         buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
976         buffer->dont_write_tag[0] = 'D';
977         buffer->dont_write_tag[1] = 'W';
978         buffer->end_tag[0] = 'Z';
979         buffer->end_tag[1] = 'Z';
980
981         rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
982
983         kfree(buffer);
984
985         return rc;
986 }
987
988 #pragma pack(1)
989
990 struct bmic_host_wellness_time {
991         u8      start_tag[4];
992         u8      time_tag[2];
993         __le16  time_length;
994         u8      time[8];
995         u8      dont_write_tag[2];
996         u8      end_tag[2];
997 };
998
999 #pragma pack()
1000
1001 static int pqi_write_current_time_to_host_wellness(
1002         struct pqi_ctrl_info *ctrl_info)
1003 {
1004         int rc;
1005         struct bmic_host_wellness_time *buffer;
1006         size_t buffer_length;
1007         time64_t local_time;
1008         unsigned int year;
1009         struct tm tm;
1010
1011         buffer_length = sizeof(*buffer);
1012
1013         buffer = kmalloc(buffer_length, GFP_KERNEL);
1014         if (!buffer)
1015                 return -ENOMEM;
1016
1017         buffer->start_tag[0] = '<';
1018         buffer->start_tag[1] = 'H';
1019         buffer->start_tag[2] = 'W';
1020         buffer->start_tag[3] = '>';
1021         buffer->time_tag[0] = 'T';
1022         buffer->time_tag[1] = 'D';
1023         put_unaligned_le16(sizeof(buffer->time),
1024                 &buffer->time_length);
1025
1026         local_time = ktime_get_real_seconds();
1027         time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
1028         year = tm.tm_year + 1900;
1029
1030         buffer->time[0] = bin2bcd(tm.tm_hour);
1031         buffer->time[1] = bin2bcd(tm.tm_min);
1032         buffer->time[2] = bin2bcd(tm.tm_sec);
1033         buffer->time[3] = 0;
1034         buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1035         buffer->time[5] = bin2bcd(tm.tm_mday);
1036         buffer->time[6] = bin2bcd(year / 100);
1037         buffer->time[7] = bin2bcd(year % 100);
1038
1039         buffer->dont_write_tag[0] = 'D';
1040         buffer->dont_write_tag[1] = 'W';
1041         buffer->end_tag[0] = 'Z';
1042         buffer->end_tag[1] = 'Z';
1043
1044         rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1045
1046         kfree(buffer);
1047
1048         return rc;
1049 }
1050
1051 #define PQI_UPDATE_TIME_WORK_INTERVAL   (24UL * 60 * 60 * HZ)
1052
1053 static void pqi_update_time_worker(struct work_struct *work)
1054 {
1055         int rc;
1056         struct pqi_ctrl_info *ctrl_info;
1057
1058         ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1059                 update_time_work);
1060
1061         rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1062         if (rc)
1063                 dev_warn(&ctrl_info->pci_dev->dev,
1064                         "error updating time on controller\n");
1065
1066         schedule_delayed_work(&ctrl_info->update_time_work,
1067                 PQI_UPDATE_TIME_WORK_INTERVAL);
1068 }
1069
1070 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1071 {
1072         schedule_delayed_work(&ctrl_info->update_time_work, 0);
1073 }
1074
1075 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1076 {
1077         cancel_delayed_work_sync(&ctrl_info->update_time_work);
1078 }
1079
1080 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1081         size_t buffer_length)
1082 {
1083         return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1084 }
1085
1086 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1087 {
1088         int rc;
1089         size_t lun_list_length;
1090         size_t lun_data_length;
1091         size_t new_lun_list_length;
1092         void *lun_data = NULL;
1093         struct report_lun_header *report_lun_header;
1094
1095         report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1096         if (!report_lun_header) {
1097                 rc = -ENOMEM;
1098                 goto out;
1099         }
1100
1101         rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1102         if (rc)
1103                 goto out;
1104
1105         lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1106
1107 again:
1108         lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1109
1110         lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1111         if (!lun_data) {
1112                 rc = -ENOMEM;
1113                 goto out;
1114         }
1115
1116         if (lun_list_length == 0) {
1117                 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1118                 goto out;
1119         }
1120
1121         rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1122         if (rc)
1123                 goto out;
1124
1125         new_lun_list_length =
1126                 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1127
1128         if (new_lun_list_length > lun_list_length) {
1129                 lun_list_length = new_lun_list_length;
1130                 kfree(lun_data);
1131                 goto again;
1132         }
1133
1134 out:
1135         kfree(report_lun_header);
1136
1137         if (rc) {
1138                 kfree(lun_data);
1139                 lun_data = NULL;
1140         }
1141
1142         *buffer = lun_data;
1143
1144         return rc;
1145 }
1146
1147 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1148 {
1149         int rc;
1150         unsigned int i;
1151         u8 rpl_response_format;
1152         u32 num_physicals;
1153         size_t rpl_16byte_wwid_list_length;
1154         void *rpl_list;
1155         struct report_lun_header *rpl_header;
1156         struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1157         struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1158
1159         rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1160         if (rc)
1161                 return rc;
1162
1163         if (ctrl_info->rpl_extended_format_4_5_supported) {
1164                 rpl_header = rpl_list;
1165                 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1166                 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1167                         *buffer = rpl_list;
1168                         return 0;
1169                 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1170                         dev_err(&ctrl_info->pci_dev->dev,
1171                                 "RPL returned unsupported data format %u\n",
1172                                 rpl_response_format);
1173                         return -EINVAL;
1174                 } else {
1175                         dev_warn(&ctrl_info->pci_dev->dev,
1176                                 "RPL returned extended format 2 instead of 4\n");
1177                 }
1178         }
1179
1180         rpl_8byte_wwid_list = rpl_list;
1181         num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1182         rpl_16byte_wwid_list_length = sizeof(struct report_lun_header) + (num_physicals * sizeof(struct report_phys_lun_16byte_wwid));
1183
1184         rpl_16byte_wwid_list = kmalloc(rpl_16byte_wwid_list_length, GFP_KERNEL);
1185         if (!rpl_16byte_wwid_list)
1186                 return -ENOMEM;
1187
1188         put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1189                 &rpl_16byte_wwid_list->header.list_length);
1190         rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1191
1192         for (i = 0; i < num_physicals; i++) {
1193                 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
1194                 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1195                 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
1196                 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1197                 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1198                 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1199                 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1200                 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1201         }
1202
1203         kfree(rpl_8byte_wwid_list);
1204         *buffer = rpl_16byte_wwid_list;
1205
1206         return 0;
1207 }
1208
1209 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1210 {
1211         return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1212 }
1213
1214 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1215         struct report_phys_lun_16byte_wwid_list **physdev_list,
1216         struct report_log_lun_list **logdev_list)
1217 {
1218         int rc;
1219         size_t logdev_list_length;
1220         size_t logdev_data_length;
1221         struct report_log_lun_list *internal_logdev_list;
1222         struct report_log_lun_list *logdev_data;
1223         struct report_lun_header report_lun_header;
1224
1225         rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1226         if (rc)
1227                 dev_err(&ctrl_info->pci_dev->dev,
1228                         "report physical LUNs failed\n");
1229
1230         rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1231         if (rc)
1232                 dev_err(&ctrl_info->pci_dev->dev,
1233                         "report logical LUNs failed\n");
1234
1235         /*
1236          * Tack the controller itself onto the end of the logical device list.
1237          */
1238
1239         logdev_data = *logdev_list;
1240
1241         if (logdev_data) {
1242                 logdev_list_length =
1243                         get_unaligned_be32(&logdev_data->header.list_length);
1244         } else {
1245                 memset(&report_lun_header, 0, sizeof(report_lun_header));
1246                 logdev_data =
1247                         (struct report_log_lun_list *)&report_lun_header;
1248                 logdev_list_length = 0;
1249         }
1250
1251         logdev_data_length = sizeof(struct report_lun_header) +
1252                 logdev_list_length;
1253
1254         internal_logdev_list = kmalloc(logdev_data_length +
1255                 sizeof(struct report_log_lun), GFP_KERNEL);
1256         if (!internal_logdev_list) {
1257                 kfree(*logdev_list);
1258                 *logdev_list = NULL;
1259                 return -ENOMEM;
1260         }
1261
1262         memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1263         memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1264                 sizeof(struct report_log_lun));
1265         put_unaligned_be32(logdev_list_length +
1266                 sizeof(struct report_log_lun),
1267                 &internal_logdev_list->header.list_length);
1268
1269         kfree(*logdev_list);
1270         *logdev_list = internal_logdev_list;
1271
1272         return 0;
1273 }
1274
1275 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1276         int bus, int target, int lun)
1277 {
1278         device->bus = bus;
1279         device->target = target;
1280         device->lun = lun;
1281 }
1282
1283 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1284 {
1285         u8 *scsi3addr;
1286         u32 lunid;
1287         int bus;
1288         int target;
1289         int lun;
1290
1291         scsi3addr = device->scsi3addr;
1292         lunid = get_unaligned_le32(scsi3addr);
1293
1294         if (pqi_is_hba_lunid(scsi3addr)) {
1295                 /* The specified device is the controller. */
1296                 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1297                 device->target_lun_valid = true;
1298                 return;
1299         }
1300
1301         if (pqi_is_logical_device(device)) {
1302                 if (device->is_external_raid_device) {
1303                         bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1304                         target = (lunid >> 16) & 0x3fff;
1305                         lun = lunid & 0xff;
1306                 } else {
1307                         bus = PQI_RAID_VOLUME_BUS;
1308                         target = 0;
1309                         lun = lunid & 0x3fff;
1310                 }
1311                 pqi_set_bus_target_lun(device, bus, target, lun);
1312                 device->target_lun_valid = true;
1313                 return;
1314         }
1315
1316         /*
1317          * Defer target and LUN assignment for non-controller physical devices
1318          * because the SAS transport layer will make these assignments later.
1319          */
1320         pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1321 }
1322
1323 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1324         struct pqi_scsi_dev *device)
1325 {
1326         int rc;
1327         u8 raid_level;
1328         u8 *buffer;
1329
1330         raid_level = SA_RAID_UNKNOWN;
1331
1332         buffer = kmalloc(64, GFP_KERNEL);
1333         if (buffer) {
1334                 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1335                         VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1336                 if (rc == 0) {
1337                         raid_level = buffer[8];
1338                         if (raid_level > SA_RAID_MAX)
1339                                 raid_level = SA_RAID_UNKNOWN;
1340                 }
1341                 kfree(buffer);
1342         }
1343
1344         device->raid_level = raid_level;
1345 }
1346
1347 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1348         struct pqi_scsi_dev *device, struct raid_map *raid_map)
1349 {
1350         char *err_msg;
1351         u32 raid_map_size;
1352         u32 r5or6_blocks_per_row;
1353
1354         raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1355
1356         if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1357                 err_msg = "RAID map too small";
1358                 goto bad_raid_map;
1359         }
1360
1361         if (device->raid_level == SA_RAID_1) {
1362                 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1363                         err_msg = "invalid RAID-1 map";
1364                         goto bad_raid_map;
1365                 }
1366         } else if (device->raid_level == SA_RAID_TRIPLE) {
1367                 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1368                         err_msg = "invalid RAID-1(Triple) map";
1369                         goto bad_raid_map;
1370                 }
1371         } else if ((device->raid_level == SA_RAID_5 ||
1372                 device->raid_level == SA_RAID_6) &&
1373                 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1374                 /* RAID 50/60 */
1375                 r5or6_blocks_per_row =
1376                         get_unaligned_le16(&raid_map->strip_size) *
1377                         get_unaligned_le16(&raid_map->data_disks_per_row);
1378                 if (r5or6_blocks_per_row == 0) {
1379                         err_msg = "invalid RAID-5 or RAID-6 map";
1380                         goto bad_raid_map;
1381                 }
1382         }
1383
1384         return 0;
1385
1386 bad_raid_map:
1387         dev_warn(&ctrl_info->pci_dev->dev,
1388                 "logical device %08x%08x %s\n",
1389                 *((u32 *)&device->scsi3addr),
1390                 *((u32 *)&device->scsi3addr[4]), err_msg);
1391
1392         return -EINVAL;
1393 }
1394
1395 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1396         struct pqi_scsi_dev *device)
1397 {
1398         int rc;
1399         u32 raid_map_size;
1400         struct raid_map *raid_map;
1401
1402         raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1403         if (!raid_map)
1404                 return -ENOMEM;
1405
1406         rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1407                 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1408         if (rc)
1409                 goto error;
1410
1411         raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1412
1413         if (raid_map_size > sizeof(*raid_map)) {
1414
1415                 kfree(raid_map);
1416
1417                 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1418                 if (!raid_map)
1419                         return -ENOMEM;
1420
1421                 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1422                         device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1423                 if (rc)
1424                         goto error;
1425
1426                 if (get_unaligned_le32(&raid_map->structure_size)
1427                         != raid_map_size) {
1428                         dev_warn(&ctrl_info->pci_dev->dev,
1429                                 "requested %u bytes, received %u bytes\n",
1430                                 raid_map_size,
1431                                 get_unaligned_le32(&raid_map->structure_size));
1432                         rc = -EINVAL;
1433                         goto error;
1434                 }
1435         }
1436
1437         rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1438         if (rc)
1439                 goto error;
1440
1441         device->raid_map = raid_map;
1442
1443         return 0;
1444
1445 error:
1446         kfree(raid_map);
1447
1448         return rc;
1449 }
1450
1451 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1452         struct pqi_scsi_dev *device)
1453 {
1454         if (!ctrl_info->lv_drive_type_mix_valid) {
1455                 device->max_transfer_encrypted = ~0;
1456                 return;
1457         }
1458
1459         switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1460         case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1461         case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1462         case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1463         case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1464         case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1465         case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1466         case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1467                 device->max_transfer_encrypted =
1468                         ctrl_info->max_transfer_encrypted_sas_sata;
1469                 break;
1470         case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1471                 device->max_transfer_encrypted =
1472                         ctrl_info->max_transfer_encrypted_nvme;
1473                 break;
1474         case LV_DRIVE_TYPE_MIX_UNKNOWN:
1475         case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1476         default:
1477                 device->max_transfer_encrypted =
1478                         min(ctrl_info->max_transfer_encrypted_sas_sata,
1479                                 ctrl_info->max_transfer_encrypted_nvme);
1480                 break;
1481         }
1482 }
1483
1484 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1485         struct pqi_scsi_dev *device)
1486 {
1487         int rc;
1488         u8 *buffer;
1489         u8 bypass_status;
1490
1491         buffer = kmalloc(64, GFP_KERNEL);
1492         if (!buffer)
1493                 return;
1494
1495         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1496                 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1497         if (rc)
1498                 goto out;
1499
1500 #define RAID_BYPASS_STATUS              4
1501 #define RAID_BYPASS_CONFIGURED          0x1
1502 #define RAID_BYPASS_ENABLED             0x2
1503
1504         bypass_status = buffer[RAID_BYPASS_STATUS];
1505         device->raid_bypass_configured =
1506                 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1507         if (device->raid_bypass_configured &&
1508                 (bypass_status & RAID_BYPASS_ENABLED) &&
1509                 pqi_get_raid_map(ctrl_info, device) == 0) {
1510                 device->raid_bypass_enabled = true;
1511                 if (get_unaligned_le16(&device->raid_map->flags) &
1512                         RAID_MAP_ENCRYPTION_ENABLED)
1513                         pqi_set_max_transfer_encrypted(ctrl_info, device);
1514         }
1515
1516 out:
1517         kfree(buffer);
1518 }
1519
1520 /*
1521  * Use vendor-specific VPD to determine online/offline status of a volume.
1522  */
1523
1524 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1525         struct pqi_scsi_dev *device)
1526 {
1527         int rc;
1528         size_t page_length;
1529         u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1530         bool volume_offline = true;
1531         u32 volume_flags;
1532         struct ciss_vpd_logical_volume_status *vpd;
1533
1534         vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1535         if (!vpd)
1536                 goto no_buffer;
1537
1538         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1539                 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1540         if (rc)
1541                 goto out;
1542
1543         if (vpd->page_code != CISS_VPD_LV_STATUS)
1544                 goto out;
1545
1546         page_length = offsetof(struct ciss_vpd_logical_volume_status,
1547                 volume_status) + vpd->page_length;
1548         if (page_length < sizeof(*vpd))
1549                 goto out;
1550
1551         volume_status = vpd->volume_status;
1552         volume_flags = get_unaligned_be32(&vpd->flags);
1553         volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1554
1555 out:
1556         kfree(vpd);
1557 no_buffer:
1558         device->volume_status = volume_status;
1559         device->volume_offline = volume_offline;
1560 }
1561
1562 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED   0x01
1563 #define PQI_DEVICE_PHY_MAP_SUPPORTED    0x10
1564
1565 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1566         struct pqi_scsi_dev *device,
1567         struct bmic_identify_physical_device *id_phys)
1568 {
1569         int rc;
1570
1571         memset(id_phys, 0, sizeof(*id_phys));
1572
1573         rc = pqi_identify_physical_device(ctrl_info, device,
1574                 id_phys, sizeof(*id_phys));
1575         if (rc) {
1576                 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1577                 return rc;
1578         }
1579
1580         scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1581         scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1582
1583         memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1584         memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1585
1586         device->box_index = id_phys->box_index;
1587         device->phys_box_on_bus = id_phys->phys_box_on_bus;
1588         device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1589         device->queue_depth =
1590                 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1591         device->active_path_index = id_phys->active_path_number;
1592         device->path_map = id_phys->redundant_path_present_map;
1593         memcpy(&device->box,
1594                 &id_phys->alternate_paths_phys_box_on_port,
1595                 sizeof(device->box));
1596         memcpy(&device->phys_connector,
1597                 &id_phys->alternate_paths_phys_connector,
1598                 sizeof(device->phys_connector));
1599         device->bay = id_phys->phys_bay_in_box;
1600
1601         if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1602                 id_phys->phy_count)
1603                 device->phy_id =
1604                         id_phys->phy_to_phy_map[device->active_path_index];
1605         else
1606                 device->phy_id = 0xFF;
1607
1608         device->ncq_prio_support =
1609                 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
1610                 PQI_DEVICE_NCQ_PRIO_SUPPORTED);
1611
1612         return 0;
1613 }
1614
1615 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1616         struct pqi_scsi_dev *device)
1617 {
1618         int rc;
1619         u8 *buffer;
1620
1621         buffer = kmalloc(64, GFP_KERNEL);
1622         if (!buffer)
1623                 return -ENOMEM;
1624
1625         /* Send an inquiry to the device to see what it is. */
1626         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1627         if (rc)
1628                 goto out;
1629
1630         scsi_sanitize_inquiry_string(&buffer[8], 8);
1631         scsi_sanitize_inquiry_string(&buffer[16], 16);
1632
1633         device->devtype = buffer[0] & 0x1f;
1634         memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1635         memcpy(device->model, &buffer[16], sizeof(device->model));
1636
1637         if (device->devtype == TYPE_DISK) {
1638                 if (device->is_external_raid_device) {
1639                         device->raid_level = SA_RAID_UNKNOWN;
1640                         device->volume_status = CISS_LV_OK;
1641                         device->volume_offline = false;
1642                 } else {
1643                         pqi_get_raid_level(ctrl_info, device);
1644                         pqi_get_raid_bypass_status(ctrl_info, device);
1645                         pqi_get_volume_status(ctrl_info, device);
1646                 }
1647         }
1648
1649 out:
1650         kfree(buffer);
1651
1652         return rc;
1653 }
1654
1655 /*
1656  * Prevent adding drive to OS for some corner cases such as a drive
1657  * undergoing a sanitize operation. Some OSes will continue to poll
1658  * the drive until the sanitize completes, which can take hours,
1659  * resulting in long bootup delays. Commands such as TUR, READ_CAP
1660  * are allowed, but READ/WRITE cause check condition. So the OS
1661  * cannot check/read the partition table.
1662  * Note: devices that have completed sanitize must be re-enabled
1663  *       using the management utility.
1664  */
1665 static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info,
1666         struct pqi_scsi_dev *device)
1667 {
1668         u8 scsi_status;
1669         int rc;
1670         enum dma_data_direction dir;
1671         char *buffer;
1672         int buffer_length = 64;
1673         size_t sense_data_length;
1674         struct scsi_sense_hdr sshdr;
1675         struct pqi_raid_path_request request;
1676         struct pqi_raid_error_info error_info;
1677         bool offline = false; /* Assume keep online */
1678
1679         /* Do not check controllers. */
1680         if (pqi_is_hba_lunid(device->scsi3addr))
1681                 return false;
1682
1683         /* Do not check LVs. */
1684         if (pqi_is_logical_device(device))
1685                 return false;
1686
1687         buffer = kmalloc(buffer_length, GFP_KERNEL);
1688         if (!buffer)
1689                 return false; /* Assume not offline */
1690
1691         /* Check for SANITIZE in progress using TUR */
1692         rc = pqi_build_raid_path_request(ctrl_info, &request,
1693                 TEST_UNIT_READY, RAID_CTLR_LUNID, buffer,
1694                 buffer_length, 0, &dir);
1695         if (rc)
1696                 goto out; /* Assume not offline */
1697
1698         memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number));
1699
1700         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info);
1701
1702         if (rc)
1703                 goto out; /* Assume not offline */
1704
1705         scsi_status = error_info.status;
1706         sense_data_length = get_unaligned_le16(&error_info.sense_data_length);
1707         if (sense_data_length == 0)
1708                 sense_data_length =
1709                         get_unaligned_le16(&error_info.response_data_length);
1710         if (sense_data_length) {
1711                 if (sense_data_length > sizeof(error_info.data))
1712                         sense_data_length = sizeof(error_info.data);
1713
1714                 /*
1715                  * Check for sanitize in progress: asc:0x04, ascq: 0x1b
1716                  */
1717                 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
1718                         scsi_normalize_sense(error_info.data,
1719                                 sense_data_length, &sshdr) &&
1720                                 sshdr.sense_key == NOT_READY &&
1721                                 sshdr.asc == 0x04 &&
1722                                 sshdr.ascq == 0x1b) {
1723                         device->device_offline = true;
1724                         offline = true;
1725                         goto out; /* Keep device offline */
1726                 }
1727         }
1728
1729 out:
1730         kfree(buffer);
1731         return offline;
1732 }
1733
1734 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1735         struct pqi_scsi_dev *device,
1736         struct bmic_identify_physical_device *id_phys)
1737 {
1738         int rc;
1739
1740         if (device->is_expander_smp_device)
1741                 return 0;
1742
1743         if (pqi_is_logical_device(device))
1744                 rc = pqi_get_logical_device_info(ctrl_info, device);
1745         else
1746                 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1747
1748         return rc;
1749 }
1750
1751 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1752         struct pqi_scsi_dev *device)
1753 {
1754         char *status;
1755         static const char unknown_state_str[] =
1756                 "Volume is in an unknown state (%u)";
1757         char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1758
1759         switch (device->volume_status) {
1760         case CISS_LV_OK:
1761                 status = "Volume online";
1762                 break;
1763         case CISS_LV_FAILED:
1764                 status = "Volume failed";
1765                 break;
1766         case CISS_LV_NOT_CONFIGURED:
1767                 status = "Volume not configured";
1768                 break;
1769         case CISS_LV_DEGRADED:
1770                 status = "Volume degraded";
1771                 break;
1772         case CISS_LV_READY_FOR_RECOVERY:
1773                 status = "Volume ready for recovery operation";
1774                 break;
1775         case CISS_LV_UNDERGOING_RECOVERY:
1776                 status = "Volume undergoing recovery";
1777                 break;
1778         case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1779                 status = "Wrong physical drive was replaced";
1780                 break;
1781         case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1782                 status = "A physical drive not properly connected";
1783                 break;
1784         case CISS_LV_HARDWARE_OVERHEATING:
1785                 status = "Hardware is overheating";
1786                 break;
1787         case CISS_LV_HARDWARE_HAS_OVERHEATED:
1788                 status = "Hardware has overheated";
1789                 break;
1790         case CISS_LV_UNDERGOING_EXPANSION:
1791                 status = "Volume undergoing expansion";
1792                 break;
1793         case CISS_LV_NOT_AVAILABLE:
1794                 status = "Volume waiting for transforming volume";
1795                 break;
1796         case CISS_LV_QUEUED_FOR_EXPANSION:
1797                 status = "Volume queued for expansion";
1798                 break;
1799         case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1800                 status = "Volume disabled due to SCSI ID conflict";
1801                 break;
1802         case CISS_LV_EJECTED:
1803                 status = "Volume has been ejected";
1804                 break;
1805         case CISS_LV_UNDERGOING_ERASE:
1806                 status = "Volume undergoing background erase";
1807                 break;
1808         case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1809                 status = "Volume ready for predictive spare rebuild";
1810                 break;
1811         case CISS_LV_UNDERGOING_RPI:
1812                 status = "Volume undergoing rapid parity initialization";
1813                 break;
1814         case CISS_LV_PENDING_RPI:
1815                 status = "Volume queued for rapid parity initialization";
1816                 break;
1817         case CISS_LV_ENCRYPTED_NO_KEY:
1818                 status = "Encrypted volume inaccessible - key not present";
1819                 break;
1820         case CISS_LV_UNDERGOING_ENCRYPTION:
1821                 status = "Volume undergoing encryption process";
1822                 break;
1823         case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1824                 status = "Volume undergoing encryption re-keying process";
1825                 break;
1826         case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1827                 status = "Volume encrypted but encryption is disabled";
1828                 break;
1829         case CISS_LV_PENDING_ENCRYPTION:
1830                 status = "Volume pending migration to encrypted state";
1831                 break;
1832         case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1833                 status = "Volume pending encryption rekeying";
1834                 break;
1835         case CISS_LV_NOT_SUPPORTED:
1836                 status = "Volume not supported on this controller";
1837                 break;
1838         case CISS_LV_STATUS_UNAVAILABLE:
1839                 status = "Volume status not available";
1840                 break;
1841         default:
1842                 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1843                         unknown_state_str, device->volume_status);
1844                 status = unknown_state_buffer;
1845                 break;
1846         }
1847
1848         dev_info(&ctrl_info->pci_dev->dev,
1849                 "scsi %d:%d:%d:%d %s\n",
1850                 ctrl_info->scsi_host->host_no,
1851                 device->bus, device->target, device->lun, status);
1852 }
1853
1854 static void pqi_rescan_worker(struct work_struct *work)
1855 {
1856         struct pqi_ctrl_info *ctrl_info;
1857
1858         ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1859                 rescan_work);
1860
1861         pqi_scan_scsi_devices(ctrl_info);
1862 }
1863
1864 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1865         struct pqi_scsi_dev *device)
1866 {
1867         int rc;
1868
1869         if (pqi_is_logical_device(device))
1870                 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1871                         device->target, device->lun);
1872         else
1873                 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1874
1875         return rc;
1876 }
1877
1878 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS      (20 * 1000)
1879
1880 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1881 {
1882         int rc;
1883
1884         rc = pqi_device_wait_for_pending_io(ctrl_info, device,
1885                 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1886         if (rc)
1887                 dev_err(&ctrl_info->pci_dev->dev,
1888                         "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1889                         ctrl_info->scsi_host->host_no, device->bus,
1890                         device->target, device->lun,
1891                         atomic_read(&device->scsi_cmds_outstanding));
1892
1893         if (pqi_is_logical_device(device))
1894                 scsi_remove_device(device->sdev);
1895         else
1896                 pqi_remove_sas_device(device);
1897
1898         pqi_device_remove_start(device);
1899 }
1900
1901 /* Assumes the SCSI device list lock is held. */
1902
1903 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1904         int bus, int target, int lun)
1905 {
1906         struct pqi_scsi_dev *device;
1907
1908         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1909                 if (device->bus == bus && device->target == target && device->lun == lun)
1910                         return device;
1911
1912         return NULL;
1913 }
1914
1915 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1916 {
1917         if (dev1->is_physical_device != dev2->is_physical_device)
1918                 return false;
1919
1920         if (dev1->is_physical_device)
1921                 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
1922
1923         return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1924 }
1925
1926 enum pqi_find_result {
1927         DEVICE_NOT_FOUND,
1928         DEVICE_CHANGED,
1929         DEVICE_SAME,
1930 };
1931
1932 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1933         struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1934 {
1935         struct pqi_scsi_dev *device;
1936
1937         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1938                 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1939                         *matching_device = device;
1940                         if (pqi_device_equal(device_to_find, device)) {
1941                                 if (device_to_find->volume_offline)
1942                                         return DEVICE_CHANGED;
1943                                 return DEVICE_SAME;
1944                         }
1945                         return DEVICE_CHANGED;
1946                 }
1947         }
1948
1949         return DEVICE_NOT_FOUND;
1950 }
1951
1952 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1953 {
1954         if (device->is_expander_smp_device)
1955                 return "Enclosure SMP    ";
1956
1957         return scsi_device_type(device->devtype);
1958 }
1959
1960 #define PQI_DEV_INFO_BUFFER_LENGTH      128
1961
1962 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1963         char *action, struct pqi_scsi_dev *device)
1964 {
1965         ssize_t count;
1966         char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1967
1968         count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1969                 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1970
1971         if (device->target_lun_valid)
1972                 count += scnprintf(buffer + count,
1973                         PQI_DEV_INFO_BUFFER_LENGTH - count,
1974                         "%d:%d",
1975                         device->target,
1976                         device->lun);
1977         else
1978                 count += scnprintf(buffer + count,
1979                         PQI_DEV_INFO_BUFFER_LENGTH - count,
1980                         "-:-");
1981
1982         if (pqi_is_logical_device(device))
1983                 count += scnprintf(buffer + count,
1984                         PQI_DEV_INFO_BUFFER_LENGTH - count,
1985                         " %08x%08x",
1986                         *((u32 *)&device->scsi3addr),
1987                         *((u32 *)&device->scsi3addr[4]));
1988         else
1989                 count += scnprintf(buffer + count,
1990                         PQI_DEV_INFO_BUFFER_LENGTH - count,
1991                         " %016llx%016llx",
1992                         get_unaligned_be64(&device->wwid[0]),
1993                         get_unaligned_be64(&device->wwid[8]));
1994
1995         count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1996                 " %s %.8s %.16s ",
1997                 pqi_device_type(device),
1998                 device->vendor,
1999                 device->model);
2000
2001         if (pqi_is_logical_device(device)) {
2002                 if (device->devtype == TYPE_DISK)
2003                         count += scnprintf(buffer + count,
2004                                 PQI_DEV_INFO_BUFFER_LENGTH - count,
2005                                 "SSDSmartPathCap%c En%c %-12s",
2006                                 device->raid_bypass_configured ? '+' : '-',
2007                                 device->raid_bypass_enabled ? '+' : '-',
2008                                 pqi_raid_level_to_string(device->raid_level));
2009         } else {
2010                 count += scnprintf(buffer + count,
2011                         PQI_DEV_INFO_BUFFER_LENGTH - count,
2012                         "AIO%c", device->aio_enabled ? '+' : '-');
2013                 if (device->devtype == TYPE_DISK ||
2014                         device->devtype == TYPE_ZBC)
2015                         count += scnprintf(buffer + count,
2016                                 PQI_DEV_INFO_BUFFER_LENGTH - count,
2017                                 " qd=%-6d", device->queue_depth);
2018         }
2019
2020         dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
2021 }
2022
2023 /* Assumes the SCSI device list lock is held. */
2024
2025 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2026         struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
2027 {
2028         existing_device->device_type = new_device->device_type;
2029         existing_device->bus = new_device->bus;
2030         if (new_device->target_lun_valid) {
2031                 existing_device->target = new_device->target;
2032                 existing_device->lun = new_device->lun;
2033                 existing_device->target_lun_valid = true;
2034         }
2035
2036         if (pqi_is_logical_device(existing_device) &&
2037                 ctrl_info->logical_volume_rescan_needed)
2038                 existing_device->rescan = true;
2039
2040         /* By definition, the scsi3addr and wwid fields are already the same. */
2041
2042         existing_device->is_physical_device = new_device->is_physical_device;
2043         existing_device->is_external_raid_device =
2044                 new_device->is_external_raid_device;
2045         existing_device->is_expander_smp_device =
2046                 new_device->is_expander_smp_device;
2047         existing_device->aio_enabled = new_device->aio_enabled;
2048         memcpy(existing_device->vendor, new_device->vendor,
2049                 sizeof(existing_device->vendor));
2050         memcpy(existing_device->model, new_device->model,
2051                 sizeof(existing_device->model));
2052         existing_device->sas_address = new_device->sas_address;
2053         existing_device->raid_level = new_device->raid_level;
2054         existing_device->queue_depth = new_device->queue_depth;
2055         existing_device->aio_handle = new_device->aio_handle;
2056         existing_device->volume_status = new_device->volume_status;
2057         existing_device->active_path_index = new_device->active_path_index;
2058         existing_device->phy_id = new_device->phy_id;
2059         existing_device->path_map = new_device->path_map;
2060         existing_device->bay = new_device->bay;
2061         existing_device->box_index = new_device->box_index;
2062         existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
2063         existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
2064         memcpy(existing_device->box, new_device->box,
2065                 sizeof(existing_device->box));
2066         memcpy(existing_device->phys_connector, new_device->phys_connector,
2067                 sizeof(existing_device->phys_connector));
2068         memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
2069         kfree(existing_device->raid_map);
2070         existing_device->raid_map = new_device->raid_map;
2071         existing_device->raid_bypass_configured =
2072                 new_device->raid_bypass_configured;
2073         existing_device->raid_bypass_enabled =
2074                 new_device->raid_bypass_enabled;
2075         existing_device->device_offline = false;
2076
2077         /* To prevent this from being freed later. */
2078         new_device->raid_map = NULL;
2079 }
2080
2081 static inline void pqi_free_device(struct pqi_scsi_dev *device)
2082 {
2083         if (device) {
2084                 kfree(device->raid_map);
2085                 kfree(device);
2086         }
2087 }
2088
2089 /*
2090  * Called when exposing a new device to the OS fails in order to re-adjust
2091  * our internal SCSI device list to match the SCSI ML's view.
2092  */
2093
2094 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2095         struct pqi_scsi_dev *device)
2096 {
2097         unsigned long flags;
2098
2099         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2100         list_del(&device->scsi_device_list_entry);
2101         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2102
2103         /* Allow the device structure to be freed later. */
2104         device->keep_device = false;
2105 }
2106
2107 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2108 {
2109         if (device->is_expander_smp_device)
2110                 return device->sas_port != NULL;
2111
2112         return device->sdev != NULL;
2113 }
2114
2115 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2116         struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2117 {
2118         int rc;
2119         unsigned int i;
2120         unsigned long flags;
2121         enum pqi_find_result find_result;
2122         struct pqi_scsi_dev *device;
2123         struct pqi_scsi_dev *next;
2124         struct pqi_scsi_dev *matching_device;
2125         LIST_HEAD(add_list);
2126         LIST_HEAD(delete_list);
2127
2128         /*
2129          * The idea here is to do as little work as possible while holding the
2130          * spinlock.  That's why we go to great pains to defer anything other
2131          * than updating the internal device list until after we release the
2132          * spinlock.
2133          */
2134
2135         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2136
2137         /* Assume that all devices in the existing list have gone away. */
2138         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
2139                 device->device_gone = true;
2140
2141         for (i = 0; i < num_new_devices; i++) {
2142                 device = new_device_list[i];
2143
2144                 find_result = pqi_scsi_find_entry(ctrl_info, device,
2145                         &matching_device);
2146
2147                 switch (find_result) {
2148                 case DEVICE_SAME:
2149                         /*
2150                          * The newly found device is already in the existing
2151                          * device list.
2152                          */
2153                         device->new_device = false;
2154                         matching_device->device_gone = false;
2155                         pqi_scsi_update_device(ctrl_info, matching_device, device);
2156                         break;
2157                 case DEVICE_NOT_FOUND:
2158                         /*
2159                          * The newly found device is NOT in the existing device
2160                          * list.
2161                          */
2162                         device->new_device = true;
2163                         break;
2164                 case DEVICE_CHANGED:
2165                         /*
2166                          * The original device has gone away and we need to add
2167                          * the new device.
2168                          */
2169                         device->new_device = true;
2170                         break;
2171                 }
2172         }
2173
2174         /* Process all devices that have gone away. */
2175         list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2176                 scsi_device_list_entry) {
2177                 if (device->device_gone) {
2178                         list_del(&device->scsi_device_list_entry);
2179                         list_add_tail(&device->delete_list_entry, &delete_list);
2180                 }
2181         }
2182
2183         /* Process all new devices. */
2184         for (i = 0; i < num_new_devices; i++) {
2185                 device = new_device_list[i];
2186                 if (!device->new_device)
2187                         continue;
2188                 if (device->volume_offline)
2189                         continue;
2190                 list_add_tail(&device->scsi_device_list_entry,
2191                         &ctrl_info->scsi_device_list);
2192                 list_add_tail(&device->add_list_entry, &add_list);
2193                 /* To prevent this device structure from being freed later. */
2194                 device->keep_device = true;
2195         }
2196
2197         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2198
2199         /*
2200          * If OFA is in progress and there are devices that need to be deleted,
2201          * allow any pending reset operations to continue and unblock any SCSI
2202          * requests before removal.
2203          */
2204         if (pqi_ofa_in_progress(ctrl_info)) {
2205                 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2206                         if (pqi_is_device_added(device))
2207                                 pqi_device_remove_start(device);
2208                 pqi_ctrl_unblock_device_reset(ctrl_info);
2209                 pqi_scsi_unblock_requests(ctrl_info);
2210         }
2211
2212         /* Remove all devices that have gone away. */
2213         list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2214                 if (device->volume_offline) {
2215                         pqi_dev_info(ctrl_info, "offline", device);
2216                         pqi_show_volume_status(ctrl_info, device);
2217                 } else {
2218                         pqi_dev_info(ctrl_info, "removed", device);
2219                 }
2220                 if (pqi_is_device_added(device))
2221                         pqi_remove_device(ctrl_info, device);
2222                 list_del(&device->delete_list_entry);
2223                 pqi_free_device(device);
2224         }
2225
2226         /*
2227          * Notify the SML of any existing device changes such as;
2228          * queue depth, device size.
2229          */
2230         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2231                 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2232                         device->advertised_queue_depth = device->queue_depth;
2233                         scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2234                         if (device->rescan) {
2235                                 scsi_rescan_device(&device->sdev->sdev_gendev);
2236                                 device->rescan = false;
2237                         }
2238                 }
2239         }
2240
2241         /* Expose any new devices. */
2242         list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2243                 if (!pqi_is_device_added(device)) {
2244                         rc = pqi_add_device(ctrl_info, device);
2245                         if (rc == 0) {
2246                                 pqi_dev_info(ctrl_info, "added", device);
2247                         } else {
2248                                 dev_warn(&ctrl_info->pci_dev->dev,
2249                                         "scsi %d:%d:%d:%d addition failed, device not added\n",
2250                                         ctrl_info->scsi_host->host_no,
2251                                         device->bus, device->target,
2252                                         device->lun);
2253                                 pqi_fixup_botched_add(ctrl_info, device);
2254                         }
2255                 }
2256         }
2257
2258         ctrl_info->logical_volume_rescan_needed = false;
2259
2260 }
2261
2262 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2263 {
2264         /*
2265          * Only support the HBA controller itself as a RAID
2266          * controller.  If it's a RAID controller other than
2267          * the HBA itself (an external RAID controller, for
2268          * example), we don't support it.
2269          */
2270         if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2271                 !pqi_is_hba_lunid(device->scsi3addr))
2272                         return false;
2273
2274         return true;
2275 }
2276
2277 static inline bool pqi_skip_device(u8 *scsi3addr)
2278 {
2279         /* Ignore all masked devices. */
2280         if (MASKED_DEVICE(scsi3addr))
2281                 return true;
2282
2283         return false;
2284 }
2285
2286 static inline void pqi_mask_device(u8 *scsi3addr)
2287 {
2288         scsi3addr[3] |= 0xc0;
2289 }
2290
2291 static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
2292 {
2293         if (pqi_is_logical_device(device))
2294                 return false;
2295
2296         return (device->path_map & (device->path_map - 1)) != 0;
2297 }
2298
2299 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2300 {
2301         return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2302 }
2303
2304 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2305 {
2306         int i;
2307         int rc;
2308         LIST_HEAD(new_device_list_head);
2309         struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2310         struct report_log_lun_list *logdev_list = NULL;
2311         struct report_phys_lun_16byte_wwid *phys_lun;
2312         struct report_log_lun *log_lun;
2313         struct bmic_identify_physical_device *id_phys = NULL;
2314         u32 num_physicals;
2315         u32 num_logicals;
2316         struct pqi_scsi_dev **new_device_list = NULL;
2317         struct pqi_scsi_dev *device;
2318         struct pqi_scsi_dev *next;
2319         unsigned int num_new_devices;
2320         unsigned int num_valid_devices;
2321         bool is_physical_device;
2322         u8 *scsi3addr;
2323         unsigned int physical_index;
2324         unsigned int logical_index;
2325         static char *out_of_memory_msg =
2326                 "failed to allocate memory, device discovery stopped";
2327
2328         rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2329         if (rc)
2330                 goto out;
2331
2332         if (physdev_list)
2333                 num_physicals =
2334                         get_unaligned_be32(&physdev_list->header.list_length)
2335                                 / sizeof(physdev_list->lun_entries[0]);
2336         else
2337                 num_physicals = 0;
2338
2339         if (logdev_list)
2340                 num_logicals =
2341                         get_unaligned_be32(&logdev_list->header.list_length)
2342                                 / sizeof(logdev_list->lun_entries[0]);
2343         else
2344                 num_logicals = 0;
2345
2346         if (num_physicals) {
2347                 /*
2348                  * We need this buffer for calls to pqi_get_physical_disk_info()
2349                  * below.  We allocate it here instead of inside
2350                  * pqi_get_physical_disk_info() because it's a fairly large
2351                  * buffer.
2352                  */
2353                 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2354                 if (!id_phys) {
2355                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2356                                 out_of_memory_msg);
2357                         rc = -ENOMEM;
2358                         goto out;
2359                 }
2360
2361                 if (pqi_hide_vsep) {
2362                         for (i = num_physicals - 1; i >= 0; i--) {
2363                                 phys_lun = &physdev_list->lun_entries[i];
2364                                 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2365                                         pqi_mask_device(phys_lun->lunid);
2366                                         break;
2367                                 }
2368                         }
2369                 }
2370         }
2371
2372         if (num_logicals &&
2373                 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2374                 ctrl_info->lv_drive_type_mix_valid = true;
2375
2376         num_new_devices = num_physicals + num_logicals;
2377
2378         new_device_list = kmalloc_array(num_new_devices,
2379                                         sizeof(*new_device_list),
2380                                         GFP_KERNEL);
2381         if (!new_device_list) {
2382                 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2383                 rc = -ENOMEM;
2384                 goto out;
2385         }
2386
2387         for (i = 0; i < num_new_devices; i++) {
2388                 device = kzalloc(sizeof(*device), GFP_KERNEL);
2389                 if (!device) {
2390                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2391                                 out_of_memory_msg);
2392                         rc = -ENOMEM;
2393                         goto out;
2394                 }
2395                 list_add_tail(&device->new_device_list_entry,
2396                         &new_device_list_head);
2397         }
2398
2399         device = NULL;
2400         num_valid_devices = 0;
2401         physical_index = 0;
2402         logical_index = 0;
2403
2404         for (i = 0; i < num_new_devices; i++) {
2405
2406                 if ((!pqi_expose_ld_first && i < num_physicals) ||
2407                         (pqi_expose_ld_first && i >= num_logicals)) {
2408                         is_physical_device = true;
2409                         phys_lun = &physdev_list->lun_entries[physical_index++];
2410                         log_lun = NULL;
2411                         scsi3addr = phys_lun->lunid;
2412                 } else {
2413                         is_physical_device = false;
2414                         phys_lun = NULL;
2415                         log_lun = &logdev_list->lun_entries[logical_index++];
2416                         scsi3addr = log_lun->lunid;
2417                 }
2418
2419                 if (is_physical_device && pqi_skip_device(scsi3addr))
2420                         continue;
2421
2422                 if (device)
2423                         device = list_next_entry(device, new_device_list_entry);
2424                 else
2425                         device = list_first_entry(&new_device_list_head,
2426                                 struct pqi_scsi_dev, new_device_list_entry);
2427
2428                 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2429                 device->is_physical_device = is_physical_device;
2430                 if (is_physical_device) {
2431                         device->device_type = phys_lun->device_type;
2432                         if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2433                                 device->is_expander_smp_device = true;
2434                 } else {
2435                         device->is_external_raid_device =
2436                                 pqi_is_external_raid_addr(scsi3addr);
2437                 }
2438
2439                 if (!pqi_is_supported_device(device))
2440                         continue;
2441
2442                 /* Do not present disks that the OS cannot fully probe */
2443                 if (pqi_keep_device_offline(ctrl_info, device))
2444                         continue;
2445
2446                 /* Gather information about the device. */
2447                 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2448                 if (rc == -ENOMEM) {
2449                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2450                                 out_of_memory_msg);
2451                         goto out;
2452                 }
2453                 if (rc) {
2454                         if (device->is_physical_device)
2455                                 dev_warn(&ctrl_info->pci_dev->dev,
2456                                         "obtaining device info failed, skipping physical device %016llx%016llx\n",
2457                                         get_unaligned_be64(&phys_lun->wwid[0]),
2458                                         get_unaligned_be64(&phys_lun->wwid[8]));
2459                         else
2460                                 dev_warn(&ctrl_info->pci_dev->dev,
2461                                         "obtaining device info failed, skipping logical device %08x%08x\n",
2462                                         *((u32 *)&device->scsi3addr),
2463                                         *((u32 *)&device->scsi3addr[4]));
2464                         rc = 0;
2465                         continue;
2466                 }
2467
2468                 pqi_assign_bus_target_lun(device);
2469
2470                 if (device->is_physical_device) {
2471                         memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
2472                         if ((phys_lun->device_flags &
2473                                 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2474                                 phys_lun->aio_handle) {
2475                                         device->aio_enabled = true;
2476                                         device->aio_handle =
2477                                                 phys_lun->aio_handle;
2478                         }
2479                 } else {
2480                         memcpy(device->volume_id, log_lun->volume_id,
2481                                 sizeof(device->volume_id));
2482                 }
2483
2484                 device->sas_address = get_unaligned_be64(&device->wwid[0]);
2485
2486                 new_device_list[num_valid_devices++] = device;
2487         }
2488
2489         pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2490
2491 out:
2492         list_for_each_entry_safe(device, next, &new_device_list_head,
2493                 new_device_list_entry) {
2494                 if (device->keep_device)
2495                         continue;
2496                 list_del(&device->new_device_list_entry);
2497                 pqi_free_device(device);
2498         }
2499
2500         kfree(new_device_list);
2501         kfree(physdev_list);
2502         kfree(logdev_list);
2503         kfree(id_phys);
2504
2505         return rc;
2506 }
2507
2508 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2509 {
2510         unsigned long flags;
2511         struct pqi_scsi_dev *device;
2512         struct pqi_scsi_dev *next;
2513
2514         list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2515                 scsi_device_list_entry) {
2516                 if (pqi_is_device_added(device))
2517                         pqi_remove_device(ctrl_info, device);
2518                 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2519                 list_del(&device->scsi_device_list_entry);
2520                 pqi_free_device(device);
2521                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2522         }
2523 }
2524
2525 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2526 {
2527         int rc;
2528         int mutex_acquired;
2529
2530         if (pqi_ctrl_offline(ctrl_info))
2531                 return -ENXIO;
2532
2533         mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2534
2535         if (!mutex_acquired) {
2536                 if (pqi_ctrl_scan_blocked(ctrl_info))
2537                         return -EBUSY;
2538                 pqi_schedule_rescan_worker_delayed(ctrl_info);
2539                 return -EINPROGRESS;
2540         }
2541
2542         rc = pqi_update_scsi_devices(ctrl_info);
2543         if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2544                 pqi_schedule_rescan_worker_delayed(ctrl_info);
2545
2546         mutex_unlock(&ctrl_info->scan_mutex);
2547
2548         return rc;
2549 }
2550
2551 static void pqi_scan_start(struct Scsi_Host *shost)
2552 {
2553         struct pqi_ctrl_info *ctrl_info;
2554
2555         ctrl_info = shost_to_hba(shost);
2556
2557         pqi_scan_scsi_devices(ctrl_info);
2558 }
2559
2560 /* Returns TRUE if scan is finished. */
2561
2562 static int pqi_scan_finished(struct Scsi_Host *shost,
2563         unsigned long elapsed_time)
2564 {
2565         struct pqi_ctrl_info *ctrl_info;
2566
2567         ctrl_info = shost_priv(shost);
2568
2569         return !mutex_is_locked(&ctrl_info->scan_mutex);
2570 }
2571
2572 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2573         struct raid_map *raid_map, u64 first_block)
2574 {
2575         u32 volume_blk_size;
2576
2577         /*
2578          * Set the encryption tweak values based on logical block address.
2579          * If the block size is 512, the tweak value is equal to the LBA.
2580          * For other block sizes, tweak value is (LBA * block size) / 512.
2581          */
2582         volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2583         if (volume_blk_size != 512)
2584                 first_block = (first_block * volume_blk_size) / 512;
2585
2586         encryption_info->data_encryption_key_index =
2587                 get_unaligned_le16(&raid_map->data_encryption_key_index);
2588         encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2589         encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2590 }
2591
2592 /*
2593  * Attempt to perform RAID bypass mapping for a logical volume I/O.
2594  */
2595
2596 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2597         struct pqi_scsi_dev_raid_map_data *rmd)
2598 {
2599         bool is_supported = true;
2600
2601         switch (rmd->raid_level) {
2602         case SA_RAID_0:
2603                 break;
2604         case SA_RAID_1:
2605                 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2606                         rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2607                         is_supported = false;
2608                 break;
2609         case SA_RAID_TRIPLE:
2610                 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2611                         rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2612                         is_supported = false;
2613                 break;
2614         case SA_RAID_5:
2615                 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2616                         rmd->data_length > ctrl_info->max_write_raid_5_6))
2617                         is_supported = false;
2618                 break;
2619         case SA_RAID_6:
2620                 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2621                         rmd->data_length > ctrl_info->max_write_raid_5_6))
2622                         is_supported = false;
2623                 break;
2624         default:
2625                 is_supported = false;
2626                 break;
2627         }
2628
2629         return is_supported;
2630 }
2631
2632 #define PQI_RAID_BYPASS_INELIGIBLE      1
2633
2634 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2635         struct pqi_scsi_dev_raid_map_data *rmd)
2636 {
2637         /* Check for valid opcode, get LBA and block count. */
2638         switch (scmd->cmnd[0]) {
2639         case WRITE_6:
2640                 rmd->is_write = true;
2641                 fallthrough;
2642         case READ_6:
2643                 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2644                         (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2645                 rmd->block_cnt = (u32)scmd->cmnd[4];
2646                 if (rmd->block_cnt == 0)
2647                         rmd->block_cnt = 256;
2648                 break;
2649         case WRITE_10:
2650                 rmd->is_write = true;
2651                 fallthrough;
2652         case READ_10:
2653                 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2654                 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2655                 break;
2656         case WRITE_12:
2657                 rmd->is_write = true;
2658                 fallthrough;
2659         case READ_12:
2660                 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2661                 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2662                 break;
2663         case WRITE_16:
2664                 rmd->is_write = true;
2665                 fallthrough;
2666         case READ_16:
2667                 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2668                 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2669                 break;
2670         default:
2671                 /* Process via normal I/O path. */
2672                 return PQI_RAID_BYPASS_INELIGIBLE;
2673         }
2674
2675         put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2676
2677         return 0;
2678 }
2679
2680 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2681         struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2682 {
2683 #if BITS_PER_LONG == 32
2684         u64 tmpdiv;
2685 #endif
2686
2687         rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2688
2689         /* Check for invalid block or wraparound. */
2690         if (rmd->last_block >=
2691                 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2692                 rmd->last_block < rmd->first_block)
2693                 return PQI_RAID_BYPASS_INELIGIBLE;
2694
2695         rmd->data_disks_per_row =
2696                 get_unaligned_le16(&raid_map->data_disks_per_row);
2697         rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2698         rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2699
2700         /* Calculate stripe information for the request. */
2701         rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2702         if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2703                 return PQI_RAID_BYPASS_INELIGIBLE;
2704 #if BITS_PER_LONG == 32
2705         tmpdiv = rmd->first_block;
2706         do_div(tmpdiv, rmd->blocks_per_row);
2707         rmd->first_row = tmpdiv;
2708         tmpdiv = rmd->last_block;
2709         do_div(tmpdiv, rmd->blocks_per_row);
2710         rmd->last_row = tmpdiv;
2711         rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2712         rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2713         tmpdiv = rmd->first_row_offset;
2714         do_div(tmpdiv, rmd->strip_size);
2715         rmd->first_column = tmpdiv;
2716         tmpdiv = rmd->last_row_offset;
2717         do_div(tmpdiv, rmd->strip_size);
2718         rmd->last_column = tmpdiv;
2719 #else
2720         rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2721         rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2722         rmd->first_row_offset = (u32)(rmd->first_block -
2723                 (rmd->first_row * rmd->blocks_per_row));
2724         rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2725                 rmd->blocks_per_row));
2726         rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2727         rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2728 #endif
2729
2730         /* If this isn't a single row/column then give to the controller. */
2731         if (rmd->first_row != rmd->last_row ||
2732                 rmd->first_column != rmd->last_column)
2733                 return PQI_RAID_BYPASS_INELIGIBLE;
2734
2735         /* Proceeding with driver mapping. */
2736         rmd->total_disks_per_row = rmd->data_disks_per_row +
2737                 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2738         rmd->map_row = ((u32)(rmd->first_row >>
2739                 raid_map->parity_rotation_shift)) %
2740                 get_unaligned_le16(&raid_map->row_cnt);
2741         rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2742                 rmd->first_column;
2743
2744         return 0;
2745 }
2746
2747 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2748         struct raid_map *raid_map)
2749 {
2750 #if BITS_PER_LONG == 32
2751         u64 tmpdiv;
2752 #endif
2753
2754         if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2755                 return PQI_RAID_BYPASS_INELIGIBLE;
2756
2757         /* RAID 50/60 */
2758         /* Verify first and last block are in same RAID group. */
2759         rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2760 #if BITS_PER_LONG == 32
2761         tmpdiv = rmd->first_block;
2762         rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2763         tmpdiv = rmd->first_group;
2764         do_div(tmpdiv, rmd->blocks_per_row);
2765         rmd->first_group = tmpdiv;
2766         tmpdiv = rmd->last_block;
2767         rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2768         tmpdiv = rmd->last_group;
2769         do_div(tmpdiv, rmd->blocks_per_row);
2770         rmd->last_group = tmpdiv;
2771 #else
2772         rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2773         rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2774 #endif
2775         if (rmd->first_group != rmd->last_group)
2776                 return PQI_RAID_BYPASS_INELIGIBLE;
2777
2778         /* Verify request is in a single row of RAID 5/6. */
2779 #if BITS_PER_LONG == 32
2780         tmpdiv = rmd->first_block;
2781         do_div(tmpdiv, rmd->stripesize);
2782         rmd->first_row = tmpdiv;
2783         rmd->r5or6_first_row = tmpdiv;
2784         tmpdiv = rmd->last_block;
2785         do_div(tmpdiv, rmd->stripesize);
2786         rmd->r5or6_last_row = tmpdiv;
2787 #else
2788         rmd->first_row = rmd->r5or6_first_row =
2789                 rmd->first_block / rmd->stripesize;
2790         rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2791 #endif
2792         if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2793                 return PQI_RAID_BYPASS_INELIGIBLE;
2794
2795         /* Verify request is in a single column. */
2796 #if BITS_PER_LONG == 32
2797         tmpdiv = rmd->first_block;
2798         rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2799         tmpdiv = rmd->first_row_offset;
2800         rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2801         rmd->r5or6_first_row_offset = rmd->first_row_offset;
2802         tmpdiv = rmd->last_block;
2803         rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2804         tmpdiv = rmd->r5or6_last_row_offset;
2805         rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2806         tmpdiv = rmd->r5or6_first_row_offset;
2807         do_div(tmpdiv, rmd->strip_size);
2808         rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2809         tmpdiv = rmd->r5or6_last_row_offset;
2810         do_div(tmpdiv, rmd->strip_size);
2811         rmd->r5or6_last_column = tmpdiv;
2812 #else
2813         rmd->first_row_offset = rmd->r5or6_first_row_offset =
2814                 (u32)((rmd->first_block % rmd->stripesize) %
2815                 rmd->blocks_per_row);
2816
2817         rmd->r5or6_last_row_offset =
2818                 (u32)((rmd->last_block % rmd->stripesize) %
2819                 rmd->blocks_per_row);
2820
2821         rmd->first_column =
2822                 rmd->r5or6_first_row_offset / rmd->strip_size;
2823         rmd->r5or6_first_column = rmd->first_column;
2824         rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2825 #endif
2826         if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2827                 return PQI_RAID_BYPASS_INELIGIBLE;
2828
2829         /* Request is eligible. */
2830         rmd->map_row =
2831                 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2832                 get_unaligned_le16(&raid_map->row_cnt);
2833
2834         rmd->map_index = (rmd->first_group *
2835                 (get_unaligned_le16(&raid_map->row_cnt) *
2836                 rmd->total_disks_per_row)) +
2837                 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2838
2839         if (rmd->is_write) {
2840                 u32 index;
2841
2842                 /*
2843                  * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2844                  * parity entries inside the device's raid_map.
2845                  *
2846                  * A device's RAID map is bounded by: number of RAID disks squared.
2847                  *
2848                  * The devices RAID map size is checked during device
2849                  * initialization.
2850                  */
2851                 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2852                 index *= rmd->total_disks_per_row;
2853                 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2854
2855                 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2856                 if (rmd->raid_level == SA_RAID_6) {
2857                         rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2858                         rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2859                 }
2860 #if BITS_PER_LONG == 32
2861                 tmpdiv = rmd->first_block;
2862                 do_div(tmpdiv, rmd->blocks_per_row);
2863                 rmd->row = tmpdiv;
2864 #else
2865                 rmd->row = rmd->first_block / rmd->blocks_per_row;
2866 #endif
2867         }
2868
2869         return 0;
2870 }
2871
2872 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2873 {
2874         /* Build the new CDB for the physical disk I/O. */
2875         if (rmd->disk_block > 0xffffffff) {
2876                 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2877                 rmd->cdb[1] = 0;
2878                 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2879                 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2880                 rmd->cdb[14] = 0;
2881                 rmd->cdb[15] = 0;
2882                 rmd->cdb_length = 16;
2883         } else {
2884                 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2885                 rmd->cdb[1] = 0;
2886                 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2887                 rmd->cdb[6] = 0;
2888                 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2889                 rmd->cdb[9] = 0;
2890                 rmd->cdb_length = 10;
2891         }
2892 }
2893
2894 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2895         struct pqi_scsi_dev_raid_map_data *rmd)
2896 {
2897         u32 index;
2898         u32 group;
2899
2900         group = rmd->map_index / rmd->data_disks_per_row;
2901
2902         index = rmd->map_index - (group * rmd->data_disks_per_row);
2903         rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2904         index += rmd->data_disks_per_row;
2905         rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2906         if (rmd->layout_map_count > 2) {
2907                 index += rmd->data_disks_per_row;
2908                 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2909         }
2910
2911         rmd->num_it_nexus_entries = rmd->layout_map_count;
2912 }
2913
2914 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2915         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2916         struct pqi_queue_group *queue_group)
2917 {
2918         int rc;
2919         struct raid_map *raid_map;
2920         u32 group;
2921         u32 next_bypass_group;
2922         struct pqi_encryption_info *encryption_info_ptr;
2923         struct pqi_encryption_info encryption_info;
2924         struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2925
2926         rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2927         if (rc)
2928                 return PQI_RAID_BYPASS_INELIGIBLE;
2929
2930         rmd.raid_level = device->raid_level;
2931
2932         if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2933                 return PQI_RAID_BYPASS_INELIGIBLE;
2934
2935         if (unlikely(rmd.block_cnt == 0))
2936                 return PQI_RAID_BYPASS_INELIGIBLE;
2937
2938         raid_map = device->raid_map;
2939
2940         rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2941         if (rc)
2942                 return PQI_RAID_BYPASS_INELIGIBLE;
2943
2944         if (device->raid_level == SA_RAID_1 ||
2945                 device->raid_level == SA_RAID_TRIPLE) {
2946                 if (rmd.is_write) {
2947                         pqi_calc_aio_r1_nexus(raid_map, &rmd);
2948                 } else {
2949                         group = device->next_bypass_group[rmd.map_index];
2950                         next_bypass_group = group + 1;
2951                         if (next_bypass_group >= rmd.layout_map_count)
2952                                 next_bypass_group = 0;
2953                         device->next_bypass_group[rmd.map_index] = next_bypass_group;
2954                         rmd.map_index += group * rmd.data_disks_per_row;
2955                 }
2956         } else if ((device->raid_level == SA_RAID_5 ||
2957                 device->raid_level == SA_RAID_6) &&
2958                 (rmd.layout_map_count > 1 || rmd.is_write)) {
2959                 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
2960                 if (rc)
2961                         return PQI_RAID_BYPASS_INELIGIBLE;
2962         }
2963
2964         if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
2965                 return PQI_RAID_BYPASS_INELIGIBLE;
2966
2967         rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
2968         rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2969                 rmd.first_row * rmd.strip_size +
2970                 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
2971         rmd.disk_block_cnt = rmd.block_cnt;
2972
2973         /* Handle differing logical/physical block sizes. */
2974         if (raid_map->phys_blk_shift) {
2975                 rmd.disk_block <<= raid_map->phys_blk_shift;
2976                 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
2977         }
2978
2979         if (unlikely(rmd.disk_block_cnt > 0xffff))
2980                 return PQI_RAID_BYPASS_INELIGIBLE;
2981
2982         pqi_set_aio_cdb(&rmd);
2983
2984         if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
2985                 if (rmd.data_length > device->max_transfer_encrypted)
2986                         return PQI_RAID_BYPASS_INELIGIBLE;
2987                 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
2988                 encryption_info_ptr = &encryption_info;
2989         } else {
2990                 encryption_info_ptr = NULL;
2991         }
2992
2993         if (rmd.is_write) {
2994                 switch (device->raid_level) {
2995                 case SA_RAID_1:
2996                 case SA_RAID_TRIPLE:
2997                         return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
2998                                 encryption_info_ptr, device, &rmd);
2999                 case SA_RAID_5:
3000                 case SA_RAID_6:
3001                         return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
3002                                 encryption_info_ptr, device, &rmd);
3003                 }
3004         }
3005
3006         return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3007                 rmd.cdb, rmd.cdb_length, queue_group,
3008                 encryption_info_ptr, true, false);
3009 }
3010
3011 #define PQI_STATUS_IDLE         0x0
3012
3013 #define PQI_CREATE_ADMIN_QUEUE_PAIR     1
3014 #define PQI_DELETE_ADMIN_QUEUE_PAIR     2
3015
3016 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET             0x0
3017 #define PQI_DEVICE_STATE_STATUS_AVAILABLE               0x1
3018 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY            0x2
3019 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY         0x3
3020 #define PQI_DEVICE_STATE_ERROR                          0x4
3021
3022 #define PQI_MODE_READY_TIMEOUT_SECS             30
3023 #define PQI_MODE_READY_POLL_INTERVAL_MSECS      1
3024
3025 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3026 {
3027         struct pqi_device_registers __iomem *pqi_registers;
3028         unsigned long timeout;
3029         u64 signature;
3030         u8 status;
3031
3032         pqi_registers = ctrl_info->pqi_registers;
3033         timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
3034
3035         while (1) {
3036                 signature = readq(&pqi_registers->signature);
3037                 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3038                         sizeof(signature)) == 0)
3039                         break;
3040                 if (time_after(jiffies, timeout)) {
3041                         dev_err(&ctrl_info->pci_dev->dev,
3042                                 "timed out waiting for PQI signature\n");
3043                         return -ETIMEDOUT;
3044                 }
3045                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3046         }
3047
3048         while (1) {
3049                 status = readb(&pqi_registers->function_and_status_code);
3050                 if (status == PQI_STATUS_IDLE)
3051                         break;
3052                 if (time_after(jiffies, timeout)) {
3053                         dev_err(&ctrl_info->pci_dev->dev,
3054                                 "timed out waiting for PQI IDLE\n");
3055                         return -ETIMEDOUT;
3056                 }
3057                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3058         }
3059
3060         while (1) {
3061                 if (readl(&pqi_registers->device_status) ==
3062                         PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3063                         break;
3064                 if (time_after(jiffies, timeout)) {
3065                         dev_err(&ctrl_info->pci_dev->dev,
3066                                 "timed out waiting for PQI all registers ready\n");
3067                         return -ETIMEDOUT;
3068                 }
3069                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3070         }
3071
3072         return 0;
3073 }
3074
3075 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3076 {
3077         struct pqi_scsi_dev *device;
3078
3079         device = io_request->scmd->device->hostdata;
3080         device->raid_bypass_enabled = false;
3081         device->aio_enabled = false;
3082 }
3083
3084 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
3085 {
3086         struct pqi_ctrl_info *ctrl_info;
3087         struct pqi_scsi_dev *device;
3088
3089         device = sdev->hostdata;
3090         if (device->device_offline)
3091                 return;
3092
3093         device->device_offline = true;
3094         ctrl_info = shost_to_hba(sdev->host);
3095         pqi_schedule_rescan_worker(ctrl_info);
3096         dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
3097                 path, ctrl_info->scsi_host->host_no, device->bus,
3098                 device->target, device->lun);
3099 }
3100
3101 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3102 {
3103         u8 scsi_status;
3104         u8 host_byte;
3105         struct scsi_cmnd *scmd;
3106         struct pqi_raid_error_info *error_info;
3107         size_t sense_data_length;
3108         int residual_count;
3109         int xfer_count;
3110         struct scsi_sense_hdr sshdr;
3111
3112         scmd = io_request->scmd;
3113         if (!scmd)
3114                 return;
3115
3116         error_info = io_request->error_info;
3117         scsi_status = error_info->status;
3118         host_byte = DID_OK;
3119
3120         switch (error_info->data_out_result) {
3121         case PQI_DATA_IN_OUT_GOOD:
3122                 break;
3123         case PQI_DATA_IN_OUT_UNDERFLOW:
3124                 xfer_count =
3125                         get_unaligned_le32(&error_info->data_out_transferred);
3126                 residual_count = scsi_bufflen(scmd) - xfer_count;
3127                 scsi_set_resid(scmd, residual_count);
3128                 if (xfer_count < scmd->underflow)
3129                         host_byte = DID_SOFT_ERROR;
3130                 break;
3131         case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3132         case PQI_DATA_IN_OUT_ABORTED:
3133                 host_byte = DID_ABORT;
3134                 break;
3135         case PQI_DATA_IN_OUT_TIMEOUT:
3136                 host_byte = DID_TIME_OUT;
3137                 break;
3138         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3139         case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3140         case PQI_DATA_IN_OUT_BUFFER_ERROR:
3141         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3142         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3143         case PQI_DATA_IN_OUT_ERROR:
3144         case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3145         case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3146         case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3147         case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3148         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3149         case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3150         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3151         case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3152         case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3153         case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3154         default:
3155                 host_byte = DID_ERROR;
3156                 break;
3157         }
3158
3159         sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3160         if (sense_data_length == 0)
3161                 sense_data_length =
3162                         get_unaligned_le16(&error_info->response_data_length);
3163         if (sense_data_length) {
3164                 if (sense_data_length > sizeof(error_info->data))
3165                         sense_data_length = sizeof(error_info->data);
3166
3167                 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3168                         scsi_normalize_sense(error_info->data,
3169                                 sense_data_length, &sshdr) &&
3170                                 sshdr.sense_key == HARDWARE_ERROR &&
3171                                 sshdr.asc == 0x3e) {
3172                         struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3173                         struct pqi_scsi_dev *device = scmd->device->hostdata;
3174
3175                         switch (sshdr.ascq) {
3176                         case 0x1: /* LOGICAL UNIT FAILURE */
3177                                 if (printk_ratelimit())
3178                                         scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3179                                                 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3180                                 pqi_take_device_offline(scmd->device, "RAID");
3181                                 host_byte = DID_NO_CONNECT;
3182                                 break;
3183
3184                         default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3185                                 if (printk_ratelimit())
3186                                         scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3187                                                 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3188                                 break;
3189                         }
3190                 }
3191
3192                 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3193                         sense_data_length = SCSI_SENSE_BUFFERSIZE;
3194                 memcpy(scmd->sense_buffer, error_info->data,
3195                         sense_data_length);
3196         }
3197
3198         scmd->result = scsi_status;
3199         set_host_byte(scmd, host_byte);
3200 }
3201
3202 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3203 {
3204         u8 scsi_status;
3205         u8 host_byte;
3206         struct scsi_cmnd *scmd;
3207         struct pqi_aio_error_info *error_info;
3208         size_t sense_data_length;
3209         int residual_count;
3210         int xfer_count;
3211         bool device_offline;
3212         struct pqi_scsi_dev *device;
3213
3214         scmd = io_request->scmd;
3215         error_info = io_request->error_info;
3216         host_byte = DID_OK;
3217         sense_data_length = 0;
3218         device_offline = false;
3219         device = scmd->device->hostdata;
3220
3221         switch (error_info->service_response) {
3222         case PQI_AIO_SERV_RESPONSE_COMPLETE:
3223                 scsi_status = error_info->status;
3224                 break;
3225         case PQI_AIO_SERV_RESPONSE_FAILURE:
3226                 switch (error_info->status) {
3227                 case PQI_AIO_STATUS_IO_ABORTED:
3228                         scsi_status = SAM_STAT_TASK_ABORTED;
3229                         break;
3230                 case PQI_AIO_STATUS_UNDERRUN:
3231                         scsi_status = SAM_STAT_GOOD;
3232                         residual_count = get_unaligned_le32(
3233                                                 &error_info->residual_count);
3234                         scsi_set_resid(scmd, residual_count);
3235                         xfer_count = scsi_bufflen(scmd) - residual_count;
3236                         if (xfer_count < scmd->underflow)
3237                                 host_byte = DID_SOFT_ERROR;
3238                         break;
3239                 case PQI_AIO_STATUS_OVERRUN:
3240                         scsi_status = SAM_STAT_GOOD;
3241                         break;
3242                 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3243                         pqi_aio_path_disabled(io_request);
3244                         if (pqi_is_multipath_device(device)) {
3245                                 pqi_device_remove_start(device);
3246                                 host_byte = DID_NO_CONNECT;
3247                                 scsi_status = SAM_STAT_CHECK_CONDITION;
3248                         } else {
3249                                 scsi_status = SAM_STAT_GOOD;
3250                                 io_request->status = -EAGAIN;
3251                         }
3252                         break;
3253                 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3254                 case PQI_AIO_STATUS_INVALID_DEVICE:
3255                         if (!io_request->raid_bypass) {
3256                                 device_offline = true;
3257                                 pqi_take_device_offline(scmd->device, "AIO");
3258                                 host_byte = DID_NO_CONNECT;
3259                         }
3260                         scsi_status = SAM_STAT_CHECK_CONDITION;
3261                         break;
3262                 case PQI_AIO_STATUS_IO_ERROR:
3263                 default:
3264                         scsi_status = SAM_STAT_CHECK_CONDITION;
3265                         break;
3266                 }
3267                 break;
3268         case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3269         case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3270                 scsi_status = SAM_STAT_GOOD;
3271                 break;
3272         case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3273         case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3274         default:
3275                 scsi_status = SAM_STAT_CHECK_CONDITION;
3276                 break;
3277         }
3278
3279         if (error_info->data_present) {
3280                 sense_data_length =
3281                         get_unaligned_le16(&error_info->data_length);
3282                 if (sense_data_length) {
3283                         if (sense_data_length > sizeof(error_info->data))
3284                                 sense_data_length = sizeof(error_info->data);
3285                         if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3286                                 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3287                         memcpy(scmd->sense_buffer, error_info->data,
3288                                 sense_data_length);
3289                 }
3290         }
3291
3292         if (device_offline && sense_data_length == 0)
3293                 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3294
3295         scmd->result = scsi_status;
3296         set_host_byte(scmd, host_byte);
3297 }
3298
3299 static void pqi_process_io_error(unsigned int iu_type,
3300         struct pqi_io_request *io_request)
3301 {
3302         switch (iu_type) {
3303         case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3304                 pqi_process_raid_io_error(io_request);
3305                 break;
3306         case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3307                 pqi_process_aio_io_error(io_request);
3308                 break;
3309         }
3310 }
3311
3312 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3313         struct pqi_task_management_response *response)
3314 {
3315         int rc;
3316
3317         switch (response->response_code) {
3318         case SOP_TMF_COMPLETE:
3319         case SOP_TMF_FUNCTION_SUCCEEDED:
3320                 rc = 0;
3321                 break;
3322         case SOP_TMF_REJECTED:
3323                 rc = -EAGAIN;
3324                 break;
3325         default:
3326                 rc = -EIO;
3327                 break;
3328         }
3329
3330         if (rc)
3331                 dev_err(&ctrl_info->pci_dev->dev,
3332                         "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3333
3334         return rc;
3335 }
3336
3337 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3338         enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
3339 {
3340         pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
3341 }
3342
3343 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3344 {
3345         int num_responses;
3346         pqi_index_t oq_pi;
3347         pqi_index_t oq_ci;
3348         struct pqi_io_request *io_request;
3349         struct pqi_io_response *response;
3350         u16 request_id;
3351
3352         num_responses = 0;
3353         oq_ci = queue_group->oq_ci_copy;
3354
3355         while (1) {
3356                 oq_pi = readl(queue_group->oq_pi);
3357                 if (oq_pi >= ctrl_info->num_elements_per_oq) {
3358                         pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
3359                         dev_err(&ctrl_info->pci_dev->dev,
3360                                 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3361                                 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3362                         return -1;
3363                 }
3364                 if (oq_pi == oq_ci)
3365                         break;
3366
3367                 num_responses++;
3368                 response = queue_group->oq_element_array +
3369                         (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3370
3371                 request_id = get_unaligned_le16(&response->request_id);
3372                 if (request_id >= ctrl_info->max_io_slots) {
3373                         pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
3374                         dev_err(&ctrl_info->pci_dev->dev,
3375                                 "request ID in response (%u) out of range (0-%u): producer index: %u  consumer index: %u\n",
3376                                 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3377                         return -1;
3378                 }
3379
3380                 io_request = &ctrl_info->io_request_pool[request_id];
3381                 if (atomic_read(&io_request->refcount) == 0) {
3382                         pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
3383                         dev_err(&ctrl_info->pci_dev->dev,
3384                                 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u  consumer index: %u\n",
3385                                 request_id, oq_pi, oq_ci);
3386                         return -1;
3387                 }
3388
3389                 switch (response->header.iu_type) {
3390                 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3391                 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3392                         if (io_request->scmd)
3393                                 io_request->scmd->result = 0;
3394                         fallthrough;
3395                 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3396                         break;
3397                 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3398                         io_request->status =
3399                                 get_unaligned_le16(
3400                                 &((struct pqi_vendor_general_response *)response)->status);
3401                         break;
3402                 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3403                         io_request->status = pqi_interpret_task_management_response(ctrl_info,
3404                                 (void *)response);
3405                         break;
3406                 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3407                         pqi_aio_path_disabled(io_request);
3408                         io_request->status = -EAGAIN;
3409                         break;
3410                 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3411                 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3412                         io_request->error_info = ctrl_info->error_buffer +
3413                                 (get_unaligned_le16(&response->error_index) *
3414                                 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3415                         pqi_process_io_error(response->header.iu_type, io_request);
3416                         break;
3417                 default:
3418                         pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
3419                         dev_err(&ctrl_info->pci_dev->dev,
3420                                 "unexpected IU type: 0x%x: producer index: %u  consumer index: %u\n",
3421                                 response->header.iu_type, oq_pi, oq_ci);
3422                         return -1;
3423                 }
3424
3425                 io_request->io_complete_callback(io_request, io_request->context);
3426
3427                 /*
3428                  * Note that the I/O request structure CANNOT BE TOUCHED after
3429                  * returning from the I/O completion callback!
3430                  */
3431                 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3432         }
3433
3434         if (num_responses) {
3435                 queue_group->oq_ci_copy = oq_ci;
3436                 writel(oq_ci, queue_group->oq_ci);
3437         }
3438
3439         return num_responses;
3440 }
3441
3442 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3443         unsigned int ci, unsigned int elements_in_queue)
3444 {
3445         unsigned int num_elements_used;
3446
3447         if (pi >= ci)
3448                 num_elements_used = pi - ci;
3449         else
3450                 num_elements_used = elements_in_queue - ci + pi;
3451
3452         return elements_in_queue - num_elements_used - 1;
3453 }
3454
3455 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3456         struct pqi_event_acknowledge_request *iu, size_t iu_length)
3457 {
3458         pqi_index_t iq_pi;
3459         pqi_index_t iq_ci;
3460         unsigned long flags;
3461         void *next_element;
3462         struct pqi_queue_group *queue_group;
3463
3464         queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3465         put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3466
3467         while (1) {
3468                 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3469
3470                 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3471                 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3472
3473                 if (pqi_num_elements_free(iq_pi, iq_ci,
3474                         ctrl_info->num_elements_per_iq))
3475                         break;
3476
3477                 spin_unlock_irqrestore(
3478                         &queue_group->submit_lock[RAID_PATH], flags);
3479
3480                 if (pqi_ctrl_offline(ctrl_info))
3481                         return;
3482         }
3483
3484         next_element = queue_group->iq_element_array[RAID_PATH] +
3485                 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3486
3487         memcpy(next_element, iu, iu_length);
3488
3489         iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3490         queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3491
3492         /*
3493          * This write notifies the controller that an IU is available to be
3494          * processed.
3495          */
3496         writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3497
3498         spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3499 }
3500
3501 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3502         struct pqi_event *event)
3503 {
3504         struct pqi_event_acknowledge_request request;
3505
3506         memset(&request, 0, sizeof(request));
3507
3508         request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3509         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3510                 &request.header.iu_length);
3511         request.event_type = event->event_type;
3512         put_unaligned_le16(event->event_id, &request.event_id);
3513         put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3514
3515         pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3516 }
3517
3518 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS              30
3519 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS        1
3520
3521 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3522         struct pqi_ctrl_info *ctrl_info)
3523 {
3524         u8 status;
3525         unsigned long timeout;
3526
3527         timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
3528
3529         while (1) {
3530                 status = pqi_read_soft_reset_status(ctrl_info);
3531                 if (status & PQI_SOFT_RESET_INITIATE)
3532                         return RESET_INITIATE_DRIVER;
3533
3534                 if (status & PQI_SOFT_RESET_ABORT)
3535                         return RESET_ABORT;
3536
3537                 if (!sis_is_firmware_running(ctrl_info))
3538                         return RESET_NORESPONSE;
3539
3540                 if (time_after(jiffies, timeout)) {
3541                         dev_warn(&ctrl_info->pci_dev->dev,
3542                                 "timed out waiting for soft reset status\n");
3543                         return RESET_TIMEDOUT;
3544                 }
3545
3546                 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3547         }
3548 }
3549
3550 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3551 {
3552         int rc;
3553         unsigned int delay_secs;
3554         enum pqi_soft_reset_status reset_status;
3555
3556         if (ctrl_info->soft_reset_handshake_supported)
3557                 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3558         else
3559                 reset_status = RESET_INITIATE_FIRMWARE;
3560
3561         delay_secs = PQI_POST_RESET_DELAY_SECS;
3562
3563         switch (reset_status) {
3564         case RESET_TIMEDOUT:
3565                 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3566                 fallthrough;
3567         case RESET_INITIATE_DRIVER:
3568                 dev_info(&ctrl_info->pci_dev->dev,
3569                                 "Online Firmware Activation: resetting controller\n");
3570                 sis_soft_reset(ctrl_info);
3571                 fallthrough;
3572         case RESET_INITIATE_FIRMWARE:
3573                 ctrl_info->pqi_mode_enabled = false;
3574                 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3575                 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3576                 pqi_ofa_free_host_buffer(ctrl_info);
3577                 pqi_ctrl_ofa_done(ctrl_info);
3578                 dev_info(&ctrl_info->pci_dev->dev,
3579                                 "Online Firmware Activation: %s\n",
3580                                 rc == 0 ? "SUCCESS" : "FAILED");
3581                 break;
3582         case RESET_ABORT:
3583                 dev_info(&ctrl_info->pci_dev->dev,
3584                                 "Online Firmware Activation ABORTED\n");
3585                 if (ctrl_info->soft_reset_handshake_supported)
3586                         pqi_clear_soft_reset_status(ctrl_info);
3587                 pqi_ofa_free_host_buffer(ctrl_info);
3588                 pqi_ctrl_ofa_done(ctrl_info);
3589                 pqi_ofa_ctrl_unquiesce(ctrl_info);
3590                 break;
3591         case RESET_NORESPONSE:
3592                 fallthrough;
3593         default:
3594                 dev_err(&ctrl_info->pci_dev->dev,
3595                         "unexpected Online Firmware Activation reset status: 0x%x\n",
3596                         reset_status);
3597                 pqi_ofa_free_host_buffer(ctrl_info);
3598                 pqi_ctrl_ofa_done(ctrl_info);
3599                 pqi_ofa_ctrl_unquiesce(ctrl_info);
3600                 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
3601                 break;
3602         }
3603 }
3604
3605 static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3606 {
3607         struct pqi_ctrl_info *ctrl_info;
3608
3609         ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3610
3611         pqi_ctrl_ofa_start(ctrl_info);
3612         pqi_ofa_setup_host_buffer(ctrl_info);
3613         pqi_ofa_host_memory_update(ctrl_info);
3614 }
3615
3616 static void pqi_ofa_quiesce_worker(struct work_struct *work)
3617 {
3618         struct pqi_ctrl_info *ctrl_info;
3619         struct pqi_event *event;
3620
3621         ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3622
3623         event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3624
3625         pqi_ofa_ctrl_quiesce(ctrl_info);
3626         pqi_acknowledge_event(ctrl_info, event);
3627         pqi_process_soft_reset(ctrl_info);
3628 }
3629
3630 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3631         struct pqi_event *event)
3632 {
3633         bool ack_event;
3634
3635         ack_event = true;
3636
3637         switch (event->event_id) {
3638         case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3639                 dev_info(&ctrl_info->pci_dev->dev,
3640                         "received Online Firmware Activation memory allocation request\n");
3641                 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3642                 break;
3643         case PQI_EVENT_OFA_QUIESCE:
3644                 dev_info(&ctrl_info->pci_dev->dev,
3645                         "received Online Firmware Activation quiesce request\n");
3646                 schedule_work(&ctrl_info->ofa_quiesce_work);
3647                 ack_event = false;
3648                 break;
3649         case PQI_EVENT_OFA_CANCELED:
3650                 dev_info(&ctrl_info->pci_dev->dev,
3651                         "received Online Firmware Activation cancel request: reason: %u\n",
3652                         ctrl_info->ofa_cancel_reason);
3653                 pqi_ofa_free_host_buffer(ctrl_info);
3654                 pqi_ctrl_ofa_done(ctrl_info);
3655                 break;
3656         default:
3657                 dev_err(&ctrl_info->pci_dev->dev,
3658                         "received unknown Online Firmware Activation request: event ID: %u\n",
3659                         event->event_id);
3660                 break;
3661         }
3662
3663         return ack_event;
3664 }
3665
3666 static void pqi_event_worker(struct work_struct *work)
3667 {
3668         unsigned int i;
3669         bool rescan_needed;
3670         struct pqi_ctrl_info *ctrl_info;
3671         struct pqi_event *event;
3672         bool ack_event;
3673
3674         ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3675
3676         pqi_ctrl_busy(ctrl_info);
3677         pqi_wait_if_ctrl_blocked(ctrl_info);
3678         if (pqi_ctrl_offline(ctrl_info))
3679                 goto out;
3680
3681         rescan_needed = false;
3682         event = ctrl_info->events;
3683         for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3684                 if (event->pending) {
3685                         event->pending = false;
3686                         if (event->event_type == PQI_EVENT_TYPE_OFA) {
3687                                 ack_event = pqi_ofa_process_event(ctrl_info, event);
3688                         } else {
3689                                 ack_event = true;
3690                                 rescan_needed = true;
3691                                 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
3692                                         ctrl_info->logical_volume_rescan_needed = true;
3693                         }
3694                         if (ack_event)
3695                                 pqi_acknowledge_event(ctrl_info, event);
3696                 }
3697                 event++;
3698         }
3699
3700         if (rescan_needed)
3701                 pqi_schedule_rescan_worker_delayed(ctrl_info);
3702
3703 out:
3704         pqi_ctrl_unbusy(ctrl_info);
3705 }
3706
3707 #define PQI_HEARTBEAT_TIMER_INTERVAL    (10 * HZ)
3708
3709 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3710 {
3711         int num_interrupts;
3712         u32 heartbeat_count;
3713         struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
3714
3715         pqi_check_ctrl_health(ctrl_info);
3716         if (pqi_ctrl_offline(ctrl_info))
3717                 return;
3718
3719         num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3720         heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3721
3722         if (num_interrupts == ctrl_info->previous_num_interrupts) {
3723                 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3724                         dev_err(&ctrl_info->pci_dev->dev,
3725                                 "no heartbeat detected - last heartbeat count: %u\n",
3726                                 heartbeat_count);
3727                         pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
3728                         return;
3729                 }
3730         } else {
3731                 ctrl_info->previous_num_interrupts = num_interrupts;
3732         }
3733
3734         ctrl_info->previous_heartbeat_count = heartbeat_count;
3735         mod_timer(&ctrl_info->heartbeat_timer,
3736                 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3737 }
3738
3739 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3740 {
3741         if (!ctrl_info->heartbeat_counter)
3742                 return;
3743
3744         ctrl_info->previous_num_interrupts =
3745                 atomic_read(&ctrl_info->num_interrupts);
3746         ctrl_info->previous_heartbeat_count =
3747                 pqi_read_heartbeat_counter(ctrl_info);
3748
3749         ctrl_info->heartbeat_timer.expires =
3750                 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3751         add_timer(&ctrl_info->heartbeat_timer);
3752 }
3753
3754 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3755 {
3756         del_timer_sync(&ctrl_info->heartbeat_timer);
3757 }
3758
3759 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3760         struct pqi_event *event, struct pqi_event_response *response)
3761 {
3762         switch (event->event_id) {
3763         case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3764                 ctrl_info->ofa_bytes_requested =
3765                         get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3766                 break;
3767         case PQI_EVENT_OFA_CANCELED:
3768                 ctrl_info->ofa_cancel_reason =
3769                         get_unaligned_le16(&response->data.ofa_cancelled.reason);
3770                 break;
3771         }
3772 }
3773
3774 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3775 {
3776         int num_events;
3777         pqi_index_t oq_pi;
3778         pqi_index_t oq_ci;
3779         struct pqi_event_queue *event_queue;
3780         struct pqi_event_response *response;
3781         struct pqi_event *event;
3782         int event_index;
3783
3784         event_queue = &ctrl_info->event_queue;
3785         num_events = 0;
3786         oq_ci = event_queue->oq_ci_copy;
3787
3788         while (1) {
3789                 oq_pi = readl(event_queue->oq_pi);
3790                 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3791                         pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
3792                         dev_err(&ctrl_info->pci_dev->dev,
3793                                 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3794                                 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3795                         return -1;
3796                 }
3797
3798                 if (oq_pi == oq_ci)
3799                         break;
3800
3801                 num_events++;
3802                 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3803
3804                 event_index = pqi_event_type_to_event_index(response->event_type);
3805
3806                 if (event_index >= 0 && response->request_acknowledge) {
3807                         event = &ctrl_info->events[event_index];
3808                         event->pending = true;
3809                         event->event_type = response->event_type;
3810                         event->event_id = get_unaligned_le16(&response->event_id);
3811                         event->additional_event_id =
3812                                 get_unaligned_le32(&response->additional_event_id);
3813                         if (event->event_type == PQI_EVENT_TYPE_OFA)
3814                                 pqi_ofa_capture_event_payload(ctrl_info, event, response);
3815                 }
3816
3817                 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3818         }
3819
3820         if (num_events) {
3821                 event_queue->oq_ci_copy = oq_ci;
3822                 writel(oq_ci, event_queue->oq_ci);
3823                 schedule_work(&ctrl_info->event_work);
3824         }
3825
3826         return num_events;
3827 }
3828
3829 #define PQI_LEGACY_INTX_MASK    0x1
3830
3831 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3832 {
3833         u32 intx_mask;
3834         struct pqi_device_registers __iomem *pqi_registers;
3835         volatile void __iomem *register_addr;
3836
3837         pqi_registers = ctrl_info->pqi_registers;
3838
3839         if (enable_intx)
3840                 register_addr = &pqi_registers->legacy_intx_mask_clear;
3841         else
3842                 register_addr = &pqi_registers->legacy_intx_mask_set;
3843
3844         intx_mask = readl(register_addr);
3845         intx_mask |= PQI_LEGACY_INTX_MASK;
3846         writel(intx_mask, register_addr);
3847 }
3848
3849 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3850         enum pqi_irq_mode new_mode)
3851 {
3852         switch (ctrl_info->irq_mode) {
3853         case IRQ_MODE_MSIX:
3854                 switch (new_mode) {
3855                 case IRQ_MODE_MSIX:
3856                         break;
3857                 case IRQ_MODE_INTX:
3858                         pqi_configure_legacy_intx(ctrl_info, true);
3859                         sis_enable_intx(ctrl_info);
3860                         break;
3861                 case IRQ_MODE_NONE:
3862                         break;
3863                 }
3864                 break;
3865         case IRQ_MODE_INTX:
3866                 switch (new_mode) {
3867                 case IRQ_MODE_MSIX:
3868                         pqi_configure_legacy_intx(ctrl_info, false);
3869                         sis_enable_msix(ctrl_info);
3870                         break;
3871                 case IRQ_MODE_INTX:
3872                         break;
3873                 case IRQ_MODE_NONE:
3874                         pqi_configure_legacy_intx(ctrl_info, false);
3875                         break;
3876                 }
3877                 break;
3878         case IRQ_MODE_NONE:
3879                 switch (new_mode) {
3880                 case IRQ_MODE_MSIX:
3881                         sis_enable_msix(ctrl_info);
3882                         break;
3883                 case IRQ_MODE_INTX:
3884                         pqi_configure_legacy_intx(ctrl_info, true);
3885                         sis_enable_intx(ctrl_info);
3886                         break;
3887                 case IRQ_MODE_NONE:
3888                         break;
3889                 }
3890                 break;
3891         }
3892
3893         ctrl_info->irq_mode = new_mode;
3894 }
3895
3896 #define PQI_LEGACY_INTX_PENDING         0x1
3897
3898 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3899 {
3900         bool valid_irq;
3901         u32 intx_status;
3902
3903         switch (ctrl_info->irq_mode) {
3904         case IRQ_MODE_MSIX:
3905                 valid_irq = true;
3906                 break;
3907         case IRQ_MODE_INTX:
3908                 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
3909                 if (intx_status & PQI_LEGACY_INTX_PENDING)
3910                         valid_irq = true;
3911                 else
3912                         valid_irq = false;
3913                 break;
3914         case IRQ_MODE_NONE:
3915         default:
3916                 valid_irq = false;
3917                 break;
3918         }
3919
3920         return valid_irq;
3921 }
3922
3923 static irqreturn_t pqi_irq_handler(int irq, void *data)
3924 {
3925         struct pqi_ctrl_info *ctrl_info;
3926         struct pqi_queue_group *queue_group;
3927         int num_io_responses_handled;
3928         int num_events_handled;
3929
3930         queue_group = data;
3931         ctrl_info = queue_group->ctrl_info;
3932
3933         if (!pqi_is_valid_irq(ctrl_info))
3934                 return IRQ_NONE;
3935
3936         num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3937         if (num_io_responses_handled < 0)
3938                 goto out;
3939
3940         if (irq == ctrl_info->event_irq) {
3941                 num_events_handled = pqi_process_event_intr(ctrl_info);
3942                 if (num_events_handled < 0)
3943                         goto out;
3944         } else {
3945                 num_events_handled = 0;
3946         }
3947
3948         if (num_io_responses_handled + num_events_handled > 0)
3949                 atomic_inc(&ctrl_info->num_interrupts);
3950
3951         pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3952         pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3953
3954 out:
3955         return IRQ_HANDLED;
3956 }
3957
3958 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3959 {
3960         struct pci_dev *pci_dev = ctrl_info->pci_dev;
3961         int i;
3962         int rc;
3963
3964         ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3965
3966         for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3967                 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3968                         DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3969                 if (rc) {
3970                         dev_err(&pci_dev->dev,
3971                                 "irq %u init failed with error %d\n",
3972                                 pci_irq_vector(pci_dev, i), rc);
3973                         return rc;
3974                 }
3975                 ctrl_info->num_msix_vectors_initialized++;
3976         }
3977
3978         return 0;
3979 }
3980
3981 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3982 {
3983         int i;
3984
3985         for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3986                 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3987                         &ctrl_info->queue_groups[i]);
3988
3989         ctrl_info->num_msix_vectors_initialized = 0;
3990 }
3991
3992 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3993 {
3994         int num_vectors_enabled;
3995
3996         num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3997                         PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3998                         PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3999         if (num_vectors_enabled < 0) {
4000                 dev_err(&ctrl_info->pci_dev->dev,
4001                         "MSI-X init failed with error %d\n",
4002                         num_vectors_enabled);
4003                 return num_vectors_enabled;
4004         }
4005
4006         ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
4007         ctrl_info->irq_mode = IRQ_MODE_MSIX;
4008         return 0;
4009 }
4010
4011 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4012 {
4013         if (ctrl_info->num_msix_vectors_enabled) {
4014                 pci_free_irq_vectors(ctrl_info->pci_dev);
4015                 ctrl_info->num_msix_vectors_enabled = 0;
4016         }
4017 }
4018
4019 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4020 {
4021         unsigned int i;
4022         size_t alloc_length;
4023         size_t element_array_length_per_iq;
4024         size_t element_array_length_per_oq;
4025         void *element_array;
4026         void __iomem *next_queue_index;
4027         void *aligned_pointer;
4028         unsigned int num_inbound_queues;
4029         unsigned int num_outbound_queues;
4030         unsigned int num_queue_indexes;
4031         struct pqi_queue_group *queue_group;
4032
4033         element_array_length_per_iq =
4034                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4035                 ctrl_info->num_elements_per_iq;
4036         element_array_length_per_oq =
4037                 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4038                 ctrl_info->num_elements_per_oq;
4039         num_inbound_queues = ctrl_info->num_queue_groups * 2;
4040         num_outbound_queues = ctrl_info->num_queue_groups;
4041         num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4042
4043         aligned_pointer = NULL;
4044
4045         for (i = 0; i < num_inbound_queues; i++) {
4046                 aligned_pointer = PTR_ALIGN(aligned_pointer,
4047                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4048                 aligned_pointer += element_array_length_per_iq;
4049         }
4050
4051         for (i = 0; i < num_outbound_queues; i++) {
4052                 aligned_pointer = PTR_ALIGN(aligned_pointer,
4053                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4054                 aligned_pointer += element_array_length_per_oq;
4055         }
4056
4057         aligned_pointer = PTR_ALIGN(aligned_pointer,
4058                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4059         aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4060                 PQI_EVENT_OQ_ELEMENT_LENGTH;
4061
4062         for (i = 0; i < num_queue_indexes; i++) {
4063                 aligned_pointer = PTR_ALIGN(aligned_pointer,
4064                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4065                 aligned_pointer += sizeof(pqi_index_t);
4066         }
4067
4068         alloc_length = (size_t)aligned_pointer +
4069                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4070
4071         alloc_length += PQI_EXTRA_SGL_MEMORY;
4072
4073         ctrl_info->queue_memory_base =
4074                 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4075                                    &ctrl_info->queue_memory_base_dma_handle,
4076                                    GFP_KERNEL);
4077
4078         if (!ctrl_info->queue_memory_base)
4079                 return -ENOMEM;
4080
4081         ctrl_info->queue_memory_length = alloc_length;
4082
4083         element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4084                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4085
4086         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4087                 queue_group = &ctrl_info->queue_groups[i];
4088                 queue_group->iq_element_array[RAID_PATH] = element_array;
4089                 queue_group->iq_element_array_bus_addr[RAID_PATH] =
4090                         ctrl_info->queue_memory_base_dma_handle +
4091                                 (element_array - ctrl_info->queue_memory_base);
4092                 element_array += element_array_length_per_iq;
4093                 element_array = PTR_ALIGN(element_array,
4094                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4095                 queue_group->iq_element_array[AIO_PATH] = element_array;
4096                 queue_group->iq_element_array_bus_addr[AIO_PATH] =
4097                         ctrl_info->queue_memory_base_dma_handle +
4098                         (element_array - ctrl_info->queue_memory_base);
4099                 element_array += element_array_length_per_iq;
4100                 element_array = PTR_ALIGN(element_array,
4101                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4102         }
4103
4104         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4105                 queue_group = &ctrl_info->queue_groups[i];
4106                 queue_group->oq_element_array = element_array;
4107                 queue_group->oq_element_array_bus_addr =
4108                         ctrl_info->queue_memory_base_dma_handle +
4109                         (element_array - ctrl_info->queue_memory_base);
4110                 element_array += element_array_length_per_oq;
4111                 element_array = PTR_ALIGN(element_array,
4112                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4113         }
4114
4115         ctrl_info->event_queue.oq_element_array = element_array;
4116         ctrl_info->event_queue.oq_element_array_bus_addr =
4117                 ctrl_info->queue_memory_base_dma_handle +
4118                 (element_array - ctrl_info->queue_memory_base);
4119         element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4120                 PQI_EVENT_OQ_ELEMENT_LENGTH;
4121
4122         next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
4123                 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4124
4125         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4126                 queue_group = &ctrl_info->queue_groups[i];
4127                 queue_group->iq_ci[RAID_PATH] = next_queue_index;
4128                 queue_group->iq_ci_bus_addr[RAID_PATH] =
4129                         ctrl_info->queue_memory_base_dma_handle +
4130                         (next_queue_index -
4131                         (void __iomem *)ctrl_info->queue_memory_base);
4132                 next_queue_index += sizeof(pqi_index_t);
4133                 next_queue_index = PTR_ALIGN(next_queue_index,
4134                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4135                 queue_group->iq_ci[AIO_PATH] = next_queue_index;
4136                 queue_group->iq_ci_bus_addr[AIO_PATH] =
4137                         ctrl_info->queue_memory_base_dma_handle +
4138                         (next_queue_index -
4139                         (void __iomem *)ctrl_info->queue_memory_base);
4140                 next_queue_index += sizeof(pqi_index_t);
4141                 next_queue_index = PTR_ALIGN(next_queue_index,
4142                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4143                 queue_group->oq_pi = next_queue_index;
4144                 queue_group->oq_pi_bus_addr =
4145                         ctrl_info->queue_memory_base_dma_handle +
4146                         (next_queue_index -
4147                         (void __iomem *)ctrl_info->queue_memory_base);
4148                 next_queue_index += sizeof(pqi_index_t);
4149                 next_queue_index = PTR_ALIGN(next_queue_index,
4150                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4151         }
4152
4153         ctrl_info->event_queue.oq_pi = next_queue_index;
4154         ctrl_info->event_queue.oq_pi_bus_addr =
4155                 ctrl_info->queue_memory_base_dma_handle +
4156                 (next_queue_index -
4157                 (void __iomem *)ctrl_info->queue_memory_base);
4158
4159         return 0;
4160 }
4161
4162 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4163 {
4164         unsigned int i;
4165         u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4166         u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4167
4168         /*
4169          * Initialize the backpointers to the controller structure in
4170          * each operational queue group structure.
4171          */
4172         for (i = 0; i < ctrl_info->num_queue_groups; i++)
4173                 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4174
4175         /*
4176          * Assign IDs to all operational queues.  Note that the IDs
4177          * assigned to operational IQs are independent of the IDs
4178          * assigned to operational OQs.
4179          */
4180         ctrl_info->event_queue.oq_id = next_oq_id++;
4181         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4182                 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4183                 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4184                 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4185         }
4186
4187         /*
4188          * Assign MSI-X table entry indexes to all queues.  Note that the
4189          * interrupt for the event queue is shared with the first queue group.
4190          */
4191         ctrl_info->event_queue.int_msg_num = 0;
4192         for (i = 0; i < ctrl_info->num_queue_groups; i++)
4193                 ctrl_info->queue_groups[i].int_msg_num = i;
4194
4195         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4196                 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4197                 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4198                 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4199                 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4200         }
4201 }
4202
4203 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4204 {
4205         size_t alloc_length;
4206         struct pqi_admin_queues_aligned *admin_queues_aligned;
4207         struct pqi_admin_queues *admin_queues;
4208
4209         alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4210                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4211
4212         ctrl_info->admin_queue_memory_base =
4213                 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4214                                    &ctrl_info->admin_queue_memory_base_dma_handle,
4215                                    GFP_KERNEL);
4216
4217         if (!ctrl_info->admin_queue_memory_base)
4218                 return -ENOMEM;
4219
4220         ctrl_info->admin_queue_memory_length = alloc_length;
4221
4222         admin_queues = &ctrl_info->admin_queues;
4223         admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4224                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4225         admin_queues->iq_element_array =
4226                 &admin_queues_aligned->iq_element_array;
4227         admin_queues->oq_element_array =
4228                 &admin_queues_aligned->oq_element_array;
4229         admin_queues->iq_ci =
4230                 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4231         admin_queues->oq_pi =
4232                 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4233
4234         admin_queues->iq_element_array_bus_addr =
4235                 ctrl_info->admin_queue_memory_base_dma_handle +
4236                 (admin_queues->iq_element_array -
4237                 ctrl_info->admin_queue_memory_base);
4238         admin_queues->oq_element_array_bus_addr =
4239                 ctrl_info->admin_queue_memory_base_dma_handle +
4240                 (admin_queues->oq_element_array -
4241                 ctrl_info->admin_queue_memory_base);
4242         admin_queues->iq_ci_bus_addr =
4243                 ctrl_info->admin_queue_memory_base_dma_handle +
4244                 ((void __iomem *)admin_queues->iq_ci -
4245                 (void __iomem *)ctrl_info->admin_queue_memory_base);
4246         admin_queues->oq_pi_bus_addr =
4247                 ctrl_info->admin_queue_memory_base_dma_handle +
4248                 ((void __iomem *)admin_queues->oq_pi -
4249                 (void __iomem *)ctrl_info->admin_queue_memory_base);
4250
4251         return 0;
4252 }
4253
4254 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES          HZ
4255 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS      1
4256
4257 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4258 {
4259         struct pqi_device_registers __iomem *pqi_registers;
4260         struct pqi_admin_queues *admin_queues;
4261         unsigned long timeout;
4262         u8 status;
4263         u32 reg;
4264
4265         pqi_registers = ctrl_info->pqi_registers;
4266         admin_queues = &ctrl_info->admin_queues;
4267
4268         writeq((u64)admin_queues->iq_element_array_bus_addr,
4269                 &pqi_registers->admin_iq_element_array_addr);
4270         writeq((u64)admin_queues->oq_element_array_bus_addr,
4271                 &pqi_registers->admin_oq_element_array_addr);
4272         writeq((u64)admin_queues->iq_ci_bus_addr,
4273                 &pqi_registers->admin_iq_ci_addr);
4274         writeq((u64)admin_queues->oq_pi_bus_addr,
4275                 &pqi_registers->admin_oq_pi_addr);
4276
4277         reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4278                 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4279                 (admin_queues->int_msg_num << 16);
4280         writel(reg, &pqi_registers->admin_iq_num_elements);
4281
4282         writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4283                 &pqi_registers->function_and_status_code);
4284
4285         timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4286         while (1) {
4287                 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4288                 status = readb(&pqi_registers->function_and_status_code);
4289                 if (status == PQI_STATUS_IDLE)
4290                         break;
4291                 if (time_after(jiffies, timeout))
4292                         return -ETIMEDOUT;
4293         }
4294
4295         /*
4296          * The offset registers are not initialized to the correct
4297          * offsets until *after* the create admin queue pair command
4298          * completes successfully.
4299          */
4300         admin_queues->iq_pi = ctrl_info->iomem_base +
4301                 PQI_DEVICE_REGISTERS_OFFSET +
4302                 readq(&pqi_registers->admin_iq_pi_offset);
4303         admin_queues->oq_ci = ctrl_info->iomem_base +
4304                 PQI_DEVICE_REGISTERS_OFFSET +
4305                 readq(&pqi_registers->admin_oq_ci_offset);
4306
4307         return 0;
4308 }
4309
4310 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4311         struct pqi_general_admin_request *request)
4312 {
4313         struct pqi_admin_queues *admin_queues;
4314         void *next_element;
4315         pqi_index_t iq_pi;
4316
4317         admin_queues = &ctrl_info->admin_queues;
4318         iq_pi = admin_queues->iq_pi_copy;
4319
4320         next_element = admin_queues->iq_element_array +
4321                 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4322
4323         memcpy(next_element, request, sizeof(*request));
4324
4325         iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4326         admin_queues->iq_pi_copy = iq_pi;
4327
4328         /*
4329          * This write notifies the controller that an IU is available to be
4330          * processed.
4331          */
4332         writel(iq_pi, admin_queues->iq_pi);
4333 }
4334
4335 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS  60
4336
4337 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4338         struct pqi_general_admin_response *response)
4339 {
4340         struct pqi_admin_queues *admin_queues;
4341         pqi_index_t oq_pi;
4342         pqi_index_t oq_ci;
4343         unsigned long timeout;
4344
4345         admin_queues = &ctrl_info->admin_queues;
4346         oq_ci = admin_queues->oq_ci_copy;
4347
4348         timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
4349
4350         while (1) {
4351                 oq_pi = readl(admin_queues->oq_pi);
4352                 if (oq_pi != oq_ci)
4353                         break;
4354                 if (time_after(jiffies, timeout)) {
4355                         dev_err(&ctrl_info->pci_dev->dev,
4356                                 "timed out waiting for admin response\n");
4357                         return -ETIMEDOUT;
4358                 }
4359                 if (!sis_is_firmware_running(ctrl_info))
4360                         return -ENXIO;
4361                 usleep_range(1000, 2000);
4362         }
4363
4364         memcpy(response, admin_queues->oq_element_array +
4365                 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4366
4367         oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4368         admin_queues->oq_ci_copy = oq_ci;
4369         writel(oq_ci, admin_queues->oq_ci);
4370
4371         return 0;
4372 }
4373
4374 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4375         struct pqi_queue_group *queue_group, enum pqi_io_path path,
4376         struct pqi_io_request *io_request)
4377 {
4378         struct pqi_io_request *next;
4379         void *next_element;
4380         pqi_index_t iq_pi;
4381         pqi_index_t iq_ci;
4382         size_t iu_length;
4383         unsigned long flags;
4384         unsigned int num_elements_needed;
4385         unsigned int num_elements_to_end_of_queue;
4386         size_t copy_count;
4387         struct pqi_iu_header *request;
4388
4389         spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4390
4391         if (io_request) {
4392                 io_request->queue_group = queue_group;
4393                 list_add_tail(&io_request->request_list_entry,
4394                         &queue_group->request_list[path]);
4395         }
4396
4397         iq_pi = queue_group->iq_pi_copy[path];
4398
4399         list_for_each_entry_safe(io_request, next,
4400                 &queue_group->request_list[path], request_list_entry) {
4401
4402                 request = io_request->iu;
4403
4404                 iu_length = get_unaligned_le16(&request->iu_length) +
4405                         PQI_REQUEST_HEADER_LENGTH;
4406                 num_elements_needed =
4407                         DIV_ROUND_UP(iu_length,
4408                                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4409
4410                 iq_ci = readl(queue_group->iq_ci[path]);
4411
4412                 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4413                         ctrl_info->num_elements_per_iq))
4414                         break;
4415
4416                 put_unaligned_le16(queue_group->oq_id,
4417                         &request->response_queue_id);
4418
4419                 next_element = queue_group->iq_element_array[path] +
4420                         (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4421
4422                 num_elements_to_end_of_queue =
4423                         ctrl_info->num_elements_per_iq - iq_pi;
4424
4425                 if (num_elements_needed <= num_elements_to_end_of_queue) {
4426                         memcpy(next_element, request, iu_length);
4427                 } else {
4428                         copy_count = num_elements_to_end_of_queue *
4429                                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4430                         memcpy(next_element, request, copy_count);
4431                         memcpy(queue_group->iq_element_array[path],
4432                                 (u8 *)request + copy_count,
4433                                 iu_length - copy_count);
4434                 }
4435
4436                 iq_pi = (iq_pi + num_elements_needed) %
4437                         ctrl_info->num_elements_per_iq;
4438
4439                 list_del(&io_request->request_list_entry);
4440         }
4441
4442         if (iq_pi != queue_group->iq_pi_copy[path]) {
4443                 queue_group->iq_pi_copy[path] = iq_pi;
4444                 /*
4445                  * This write notifies the controller that one or more IUs are
4446                  * available to be processed.
4447                  */
4448                 writel(iq_pi, queue_group->iq_pi[path]);
4449         }
4450
4451         spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4452 }
4453
4454 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS         10
4455
4456 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4457         struct completion *wait)
4458 {
4459         int rc;
4460
4461         while (1) {
4462                 if (wait_for_completion_io_timeout(wait,
4463                         PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
4464                         rc = 0;
4465                         break;
4466                 }
4467
4468                 pqi_check_ctrl_health(ctrl_info);
4469                 if (pqi_ctrl_offline(ctrl_info)) {
4470                         rc = -ENXIO;
4471                         break;
4472                 }
4473         }
4474
4475         return rc;
4476 }
4477
4478 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4479         void *context)
4480 {
4481         struct completion *waiting = context;
4482
4483         complete(waiting);
4484 }
4485
4486 static int pqi_process_raid_io_error_synchronous(
4487         struct pqi_raid_error_info *error_info)
4488 {
4489         int rc = -EIO;
4490
4491         switch (error_info->data_out_result) {
4492         case PQI_DATA_IN_OUT_GOOD:
4493                 if (error_info->status == SAM_STAT_GOOD)
4494                         rc = 0;
4495                 break;
4496         case PQI_DATA_IN_OUT_UNDERFLOW:
4497                 if (error_info->status == SAM_STAT_GOOD ||
4498                         error_info->status == SAM_STAT_CHECK_CONDITION)
4499                         rc = 0;
4500                 break;
4501         case PQI_DATA_IN_OUT_ABORTED:
4502                 rc = PQI_CMD_STATUS_ABORTED;
4503                 break;
4504         }
4505
4506         return rc;
4507 }
4508
4509 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4510 {
4511         return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4512 }
4513
4514 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4515         struct pqi_iu_header *request, unsigned int flags,
4516         struct pqi_raid_error_info *error_info)
4517 {
4518         int rc = 0;
4519         struct pqi_io_request *io_request;
4520         size_t iu_length;
4521         DECLARE_COMPLETION_ONSTACK(wait);
4522
4523         if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4524                 if (down_interruptible(&ctrl_info->sync_request_sem))
4525                         return -ERESTARTSYS;
4526         } else {
4527                 down(&ctrl_info->sync_request_sem);
4528         }
4529
4530         pqi_ctrl_busy(ctrl_info);
4531         /*
4532          * Wait for other admin queue updates such as;
4533          * config table changes, OFA memory updates, ...
4534          */
4535         if (pqi_is_blockable_request(request))
4536                 pqi_wait_if_ctrl_blocked(ctrl_info);
4537
4538         if (pqi_ctrl_offline(ctrl_info)) {
4539                 rc = -ENXIO;
4540                 goto out;
4541         }
4542
4543         io_request = pqi_alloc_io_request(ctrl_info);
4544
4545         put_unaligned_le16(io_request->index,
4546                 &(((struct pqi_raid_path_request *)request)->request_id));
4547
4548         if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4549                 ((struct pqi_raid_path_request *)request)->error_index =
4550                         ((struct pqi_raid_path_request *)request)->request_id;
4551
4552         iu_length = get_unaligned_le16(&request->iu_length) +
4553                 PQI_REQUEST_HEADER_LENGTH;
4554         memcpy(io_request->iu, request, iu_length);
4555
4556         io_request->io_complete_callback = pqi_raid_synchronous_complete;
4557         io_request->context = &wait;
4558
4559         pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4560                 io_request);
4561
4562         pqi_wait_for_completion_io(ctrl_info, &wait);
4563
4564         if (error_info) {
4565                 if (io_request->error_info)
4566                         memcpy(error_info, io_request->error_info, sizeof(*error_info));
4567                 else
4568                         memset(error_info, 0, sizeof(*error_info));
4569         } else if (rc == 0 && io_request->error_info) {
4570                 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4571         }
4572
4573         pqi_free_io_request(io_request);
4574
4575 out:
4576         pqi_ctrl_unbusy(ctrl_info);
4577         up(&ctrl_info->sync_request_sem);
4578
4579         return rc;
4580 }
4581
4582 static int pqi_validate_admin_response(
4583         struct pqi_general_admin_response *response, u8 expected_function_code)
4584 {
4585         if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4586                 return -EINVAL;
4587
4588         if (get_unaligned_le16(&response->header.iu_length) !=
4589                 PQI_GENERAL_ADMIN_IU_LENGTH)
4590                 return -EINVAL;
4591
4592         if (response->function_code != expected_function_code)
4593                 return -EINVAL;
4594
4595         if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4596                 return -EINVAL;
4597
4598         return 0;
4599 }
4600
4601 static int pqi_submit_admin_request_synchronous(
4602         struct pqi_ctrl_info *ctrl_info,
4603         struct pqi_general_admin_request *request,
4604         struct pqi_general_admin_response *response)
4605 {
4606         int rc;
4607
4608         pqi_submit_admin_request(ctrl_info, request);
4609
4610         rc = pqi_poll_for_admin_response(ctrl_info, response);
4611
4612         if (rc == 0)
4613                 rc = pqi_validate_admin_response(response, request->function_code);
4614
4615         return rc;
4616 }
4617
4618 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4619 {
4620         int rc;
4621         struct pqi_general_admin_request request;
4622         struct pqi_general_admin_response response;
4623         struct pqi_device_capability *capability;
4624         struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4625
4626         capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4627         if (!capability)
4628                 return -ENOMEM;
4629
4630         memset(&request, 0, sizeof(request));
4631
4632         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4633         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4634                 &request.header.iu_length);
4635         request.function_code =
4636                 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4637         put_unaligned_le32(sizeof(*capability),
4638                 &request.data.report_device_capability.buffer_length);
4639
4640         rc = pqi_map_single(ctrl_info->pci_dev,
4641                 &request.data.report_device_capability.sg_descriptor,
4642                 capability, sizeof(*capability),
4643                 DMA_FROM_DEVICE);
4644         if (rc)
4645                 goto out;
4646
4647         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4648
4649         pqi_pci_unmap(ctrl_info->pci_dev,
4650                 &request.data.report_device_capability.sg_descriptor, 1,
4651                 DMA_FROM_DEVICE);
4652
4653         if (rc)
4654                 goto out;
4655
4656         if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4657                 rc = -EIO;
4658                 goto out;
4659         }
4660
4661         ctrl_info->max_inbound_queues =
4662                 get_unaligned_le16(&capability->max_inbound_queues);
4663         ctrl_info->max_elements_per_iq =
4664                 get_unaligned_le16(&capability->max_elements_per_iq);
4665         ctrl_info->max_iq_element_length =
4666                 get_unaligned_le16(&capability->max_iq_element_length)
4667                 * 16;
4668         ctrl_info->max_outbound_queues =
4669                 get_unaligned_le16(&capability->max_outbound_queues);
4670         ctrl_info->max_elements_per_oq =
4671                 get_unaligned_le16(&capability->max_elements_per_oq);
4672         ctrl_info->max_oq_element_length =
4673                 get_unaligned_le16(&capability->max_oq_element_length)
4674                 * 16;
4675
4676         sop_iu_layer_descriptor =
4677                 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4678
4679         ctrl_info->max_inbound_iu_length_per_firmware =
4680                 get_unaligned_le16(
4681                         &sop_iu_layer_descriptor->max_inbound_iu_length);
4682         ctrl_info->inbound_spanning_supported =
4683                 sop_iu_layer_descriptor->inbound_spanning_supported;
4684         ctrl_info->outbound_spanning_supported =
4685                 sop_iu_layer_descriptor->outbound_spanning_supported;
4686
4687 out:
4688         kfree(capability);
4689
4690         return rc;
4691 }
4692
4693 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4694 {
4695         if (ctrl_info->max_iq_element_length <
4696                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4697                 dev_err(&ctrl_info->pci_dev->dev,
4698                         "max. inbound queue element length of %d is less than the required length of %d\n",
4699                         ctrl_info->max_iq_element_length,
4700                         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4701                 return -EINVAL;
4702         }
4703
4704         if (ctrl_info->max_oq_element_length <
4705                 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4706                 dev_err(&ctrl_info->pci_dev->dev,
4707                         "max. outbound queue element length of %d is less than the required length of %d\n",
4708                         ctrl_info->max_oq_element_length,
4709                         PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4710                 return -EINVAL;
4711         }
4712
4713         if (ctrl_info->max_inbound_iu_length_per_firmware <
4714                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4715                 dev_err(&ctrl_info->pci_dev->dev,
4716                         "max. inbound IU length of %u is less than the min. required length of %d\n",
4717                         ctrl_info->max_inbound_iu_length_per_firmware,
4718                         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4719                 return -EINVAL;
4720         }
4721
4722         if (!ctrl_info->inbound_spanning_supported) {
4723                 dev_err(&ctrl_info->pci_dev->dev,
4724                         "the controller does not support inbound spanning\n");
4725                 return -EINVAL;
4726         }
4727
4728         if (ctrl_info->outbound_spanning_supported) {
4729                 dev_err(&ctrl_info->pci_dev->dev,
4730                         "the controller supports outbound spanning but this driver does not\n");
4731                 return -EINVAL;
4732         }
4733
4734         return 0;
4735 }
4736
4737 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4738 {
4739         int rc;
4740         struct pqi_event_queue *event_queue;
4741         struct pqi_general_admin_request request;
4742         struct pqi_general_admin_response response;
4743
4744         event_queue = &ctrl_info->event_queue;
4745
4746         /*
4747          * Create OQ (Outbound Queue - device to host queue) to dedicate
4748          * to events.
4749          */
4750         memset(&request, 0, sizeof(request));
4751         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4752         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4753                 &request.header.iu_length);
4754         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4755         put_unaligned_le16(event_queue->oq_id,
4756                 &request.data.create_operational_oq.queue_id);
4757         put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4758                 &request.data.create_operational_oq.element_array_addr);
4759         put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4760                 &request.data.create_operational_oq.pi_addr);
4761         put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4762                 &request.data.create_operational_oq.num_elements);
4763         put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4764                 &request.data.create_operational_oq.element_length);
4765         request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4766         put_unaligned_le16(event_queue->int_msg_num,
4767                 &request.data.create_operational_oq.int_msg_num);
4768
4769         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4770                 &response);
4771         if (rc)
4772                 return rc;
4773
4774         event_queue->oq_ci = ctrl_info->iomem_base +
4775                 PQI_DEVICE_REGISTERS_OFFSET +
4776                 get_unaligned_le64(
4777                         &response.data.create_operational_oq.oq_ci_offset);
4778
4779         return 0;
4780 }
4781
4782 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4783         unsigned int group_number)
4784 {
4785         int rc;
4786         struct pqi_queue_group *queue_group;
4787         struct pqi_general_admin_request request;
4788         struct pqi_general_admin_response response;
4789
4790         queue_group = &ctrl_info->queue_groups[group_number];
4791
4792         /*
4793          * Create IQ (Inbound Queue - host to device queue) for
4794          * RAID path.
4795          */
4796         memset(&request, 0, sizeof(request));
4797         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4798         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4799                 &request.header.iu_length);
4800         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4801         put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4802                 &request.data.create_operational_iq.queue_id);
4803         put_unaligned_le64(
4804                 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4805                 &request.data.create_operational_iq.element_array_addr);
4806         put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4807                 &request.data.create_operational_iq.ci_addr);
4808         put_unaligned_le16(ctrl_info->num_elements_per_iq,
4809                 &request.data.create_operational_iq.num_elements);
4810         put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4811                 &request.data.create_operational_iq.element_length);
4812         request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4813
4814         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4815                 &response);
4816         if (rc) {
4817                 dev_err(&ctrl_info->pci_dev->dev,
4818                         "error creating inbound RAID queue\n");
4819                 return rc;
4820         }
4821
4822         queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4823                 PQI_DEVICE_REGISTERS_OFFSET +
4824                 get_unaligned_le64(
4825                         &response.data.create_operational_iq.iq_pi_offset);
4826
4827         /*
4828          * Create IQ (Inbound Queue - host to device queue) for
4829          * Advanced I/O (AIO) path.
4830          */
4831         memset(&request, 0, sizeof(request));
4832         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4833         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4834                 &request.header.iu_length);
4835         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4836         put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4837                 &request.data.create_operational_iq.queue_id);
4838         put_unaligned_le64((u64)queue_group->
4839                 iq_element_array_bus_addr[AIO_PATH],
4840                 &request.data.create_operational_iq.element_array_addr);
4841         put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4842                 &request.data.create_operational_iq.ci_addr);
4843         put_unaligned_le16(ctrl_info->num_elements_per_iq,
4844                 &request.data.create_operational_iq.num_elements);
4845         put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4846                 &request.data.create_operational_iq.element_length);
4847         request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4848
4849         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4850                 &response);
4851         if (rc) {
4852                 dev_err(&ctrl_info->pci_dev->dev,
4853                         "error creating inbound AIO queue\n");
4854                 return rc;
4855         }
4856
4857         queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4858                 PQI_DEVICE_REGISTERS_OFFSET +
4859                 get_unaligned_le64(
4860                         &response.data.create_operational_iq.iq_pi_offset);
4861
4862         /*
4863          * Designate the 2nd IQ as the AIO path.  By default, all IQs are
4864          * assumed to be for RAID path I/O unless we change the queue's
4865          * property.
4866          */
4867         memset(&request, 0, sizeof(request));
4868         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4869         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4870                 &request.header.iu_length);
4871         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4872         put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4873                 &request.data.change_operational_iq_properties.queue_id);
4874         put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4875                 &request.data.change_operational_iq_properties.vendor_specific);
4876
4877         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4878                 &response);
4879         if (rc) {
4880                 dev_err(&ctrl_info->pci_dev->dev,
4881                         "error changing queue property\n");
4882                 return rc;
4883         }
4884
4885         /*
4886          * Create OQ (Outbound Queue - device to host queue).
4887          */
4888         memset(&request, 0, sizeof(request));
4889         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4890         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4891                 &request.header.iu_length);
4892         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4893         put_unaligned_le16(queue_group->oq_id,
4894                 &request.data.create_operational_oq.queue_id);
4895         put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4896                 &request.data.create_operational_oq.element_array_addr);
4897         put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4898                 &request.data.create_operational_oq.pi_addr);
4899         put_unaligned_le16(ctrl_info->num_elements_per_oq,
4900                 &request.data.create_operational_oq.num_elements);
4901         put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4902                 &request.data.create_operational_oq.element_length);
4903         request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4904         put_unaligned_le16(queue_group->int_msg_num,
4905                 &request.data.create_operational_oq.int_msg_num);
4906
4907         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4908                 &response);
4909         if (rc) {
4910                 dev_err(&ctrl_info->pci_dev->dev,
4911                         "error creating outbound queue\n");
4912                 return rc;
4913         }
4914
4915         queue_group->oq_ci = ctrl_info->iomem_base +
4916                 PQI_DEVICE_REGISTERS_OFFSET +
4917                 get_unaligned_le64(
4918                         &response.data.create_operational_oq.oq_ci_offset);
4919
4920         return 0;
4921 }
4922
4923 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4924 {
4925         int rc;
4926         unsigned int i;
4927
4928         rc = pqi_create_event_queue(ctrl_info);
4929         if (rc) {
4930                 dev_err(&ctrl_info->pci_dev->dev,
4931                         "error creating event queue\n");
4932                 return rc;
4933         }
4934
4935         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4936                 rc = pqi_create_queue_group(ctrl_info, i);
4937                 if (rc) {
4938                         dev_err(&ctrl_info->pci_dev->dev,
4939                                 "error creating queue group number %u/%u\n",
4940                                 i, ctrl_info->num_queue_groups);
4941                         return rc;
4942                 }
4943         }
4944
4945         return 0;
4946 }
4947
4948 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH   \
4949         struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
4950
4951 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4952         bool enable_events)
4953 {
4954         int rc;
4955         unsigned int i;
4956         struct pqi_event_config *event_config;
4957         struct pqi_event_descriptor *event_descriptor;
4958         struct pqi_general_management_request request;
4959
4960         event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4961                 GFP_KERNEL);
4962         if (!event_config)
4963                 return -ENOMEM;
4964
4965         memset(&request, 0, sizeof(request));
4966
4967         request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4968         put_unaligned_le16(offsetof(struct pqi_general_management_request,
4969                 data.report_event_configuration.sg_descriptors[1]) -
4970                 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4971         put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4972                 &request.data.report_event_configuration.buffer_length);
4973
4974         rc = pqi_map_single(ctrl_info->pci_dev,
4975                 request.data.report_event_configuration.sg_descriptors,
4976                 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4977                 DMA_FROM_DEVICE);
4978         if (rc)
4979                 goto out;
4980
4981         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
4982
4983         pqi_pci_unmap(ctrl_info->pci_dev,
4984                 request.data.report_event_configuration.sg_descriptors, 1,
4985                 DMA_FROM_DEVICE);
4986
4987         if (rc)
4988                 goto out;
4989
4990         for (i = 0; i < event_config->num_event_descriptors; i++) {
4991                 event_descriptor = &event_config->descriptors[i];
4992                 if (enable_events &&
4993                         pqi_is_supported_event(event_descriptor->event_type))
4994                                 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4995                                         &event_descriptor->oq_id);
4996                 else
4997                         put_unaligned_le16(0, &event_descriptor->oq_id);
4998         }
4999
5000         memset(&request, 0, sizeof(request));
5001
5002         request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
5003         put_unaligned_le16(offsetof(struct pqi_general_management_request,
5004                 data.report_event_configuration.sg_descriptors[1]) -
5005                 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5006         put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5007                 &request.data.report_event_configuration.buffer_length);
5008
5009         rc = pqi_map_single(ctrl_info->pci_dev,
5010                 request.data.report_event_configuration.sg_descriptors,
5011                 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5012                 DMA_TO_DEVICE);
5013         if (rc)
5014                 goto out;
5015
5016         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5017
5018         pqi_pci_unmap(ctrl_info->pci_dev,
5019                 request.data.report_event_configuration.sg_descriptors, 1,
5020                 DMA_TO_DEVICE);
5021
5022 out:
5023         kfree(event_config);
5024
5025         return rc;
5026 }
5027
5028 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5029 {
5030         return pqi_configure_events(ctrl_info, true);
5031 }
5032
5033 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5034 {
5035         unsigned int i;
5036         struct device *dev;
5037         size_t sg_chain_buffer_length;
5038         struct pqi_io_request *io_request;
5039
5040         if (!ctrl_info->io_request_pool)
5041                 return;
5042
5043         dev = &ctrl_info->pci_dev->dev;
5044         sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5045         io_request = ctrl_info->io_request_pool;
5046
5047         for (i = 0; i < ctrl_info->max_io_slots; i++) {
5048                 kfree(io_request->iu);
5049                 if (!io_request->sg_chain_buffer)
5050                         break;
5051                 dma_free_coherent(dev, sg_chain_buffer_length,
5052                         io_request->sg_chain_buffer,
5053                         io_request->sg_chain_buffer_dma_handle);
5054                 io_request++;
5055         }
5056
5057         kfree(ctrl_info->io_request_pool);
5058         ctrl_info->io_request_pool = NULL;
5059 }
5060
5061 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5062 {
5063         ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5064                                      ctrl_info->error_buffer_length,
5065                                      &ctrl_info->error_buffer_dma_handle,
5066                                      GFP_KERNEL);
5067         if (!ctrl_info->error_buffer)
5068                 return -ENOMEM;
5069
5070         return 0;
5071 }
5072
5073 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5074 {
5075         unsigned int i;
5076         void *sg_chain_buffer;
5077         size_t sg_chain_buffer_length;
5078         dma_addr_t sg_chain_buffer_dma_handle;
5079         struct device *dev;
5080         struct pqi_io_request *io_request;
5081
5082         ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5083                 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
5084
5085         if (!ctrl_info->io_request_pool) {
5086                 dev_err(&ctrl_info->pci_dev->dev,
5087                         "failed to allocate I/O request pool\n");
5088                 goto error;
5089         }
5090
5091         dev = &ctrl_info->pci_dev->dev;
5092         sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5093         io_request = ctrl_info->io_request_pool;
5094
5095         for (i = 0; i < ctrl_info->max_io_slots; i++) {
5096                 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
5097
5098                 if (!io_request->iu) {
5099                         dev_err(&ctrl_info->pci_dev->dev,
5100                                 "failed to allocate IU buffers\n");
5101                         goto error;
5102                 }
5103
5104                 sg_chain_buffer = dma_alloc_coherent(dev,
5105                         sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5106                         GFP_KERNEL);
5107
5108                 if (!sg_chain_buffer) {
5109                         dev_err(&ctrl_info->pci_dev->dev,
5110                                 "failed to allocate PQI scatter-gather chain buffers\n");
5111                         goto error;
5112                 }
5113
5114                 io_request->index = i;
5115                 io_request->sg_chain_buffer = sg_chain_buffer;
5116                 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
5117                 io_request++;
5118         }
5119
5120         return 0;
5121
5122 error:
5123         pqi_free_all_io_requests(ctrl_info);
5124
5125         return -ENOMEM;
5126 }
5127
5128 /*
5129  * Calculate required resources that are sized based on max. outstanding
5130  * requests and max. transfer size.
5131  */
5132
5133 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5134 {
5135         u32 max_transfer_size;
5136         u32 max_sg_entries;
5137
5138         ctrl_info->scsi_ml_can_queue =
5139                 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5140         ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5141
5142         ctrl_info->error_buffer_length =
5143                 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5144
5145         if (reset_devices)
5146                 max_transfer_size = min(ctrl_info->max_transfer_size,
5147                         PQI_MAX_TRANSFER_SIZE_KDUMP);
5148         else
5149                 max_transfer_size = min(ctrl_info->max_transfer_size,
5150                         PQI_MAX_TRANSFER_SIZE);
5151
5152         max_sg_entries = max_transfer_size / PAGE_SIZE;
5153
5154         /* +1 to cover when the buffer is not page-aligned. */
5155         max_sg_entries++;
5156
5157         max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5158
5159         max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5160
5161         ctrl_info->sg_chain_buffer_length =
5162                 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5163                 PQI_EXTRA_SGL_MEMORY;
5164         ctrl_info->sg_tablesize = max_sg_entries;
5165         ctrl_info->max_sectors = max_transfer_size / 512;
5166 }
5167
5168 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5169 {
5170         int num_queue_groups;
5171         u16 num_elements_per_iq;
5172         u16 num_elements_per_oq;
5173
5174         if (reset_devices) {
5175                 num_queue_groups = 1;
5176         } else {
5177                 int num_cpus;
5178                 int max_queue_groups;
5179
5180                 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5181                         ctrl_info->max_outbound_queues - 1);
5182                 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
5183
5184                 num_cpus = num_online_cpus();
5185                 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
5186                 num_queue_groups = min(num_queue_groups, max_queue_groups);
5187         }
5188
5189         ctrl_info->num_queue_groups = num_queue_groups;
5190         ctrl_info->max_hw_queue_index = num_queue_groups - 1;
5191
5192         /*
5193          * Make sure that the max. inbound IU length is an even multiple
5194          * of our inbound element length.
5195          */
5196         ctrl_info->max_inbound_iu_length =
5197                 (ctrl_info->max_inbound_iu_length_per_firmware /
5198                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5199                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
5200
5201         num_elements_per_iq =
5202                 (ctrl_info->max_inbound_iu_length /
5203                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5204
5205         /* Add one because one element in each queue is unusable. */
5206         num_elements_per_iq++;
5207
5208         num_elements_per_iq = min(num_elements_per_iq,
5209                 ctrl_info->max_elements_per_iq);
5210
5211         num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5212         num_elements_per_oq = min(num_elements_per_oq,
5213                 ctrl_info->max_elements_per_oq);
5214
5215         ctrl_info->num_elements_per_iq = num_elements_per_iq;
5216         ctrl_info->num_elements_per_oq = num_elements_per_oq;
5217
5218         ctrl_info->max_sg_per_iu =
5219                 ((ctrl_info->max_inbound_iu_length -
5220                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5221                 sizeof(struct pqi_sg_descriptor)) +
5222                 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5223
5224         ctrl_info->max_sg_per_r56_iu =
5225                 ((ctrl_info->max_inbound_iu_length -
5226                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5227                 sizeof(struct pqi_sg_descriptor)) +
5228                 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5229 }
5230
5231 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5232         struct scatterlist *sg)
5233 {
5234         u64 address = (u64)sg_dma_address(sg);
5235         unsigned int length = sg_dma_len(sg);
5236
5237         put_unaligned_le64(address, &sg_descriptor->address);
5238         put_unaligned_le32(length, &sg_descriptor->length);
5239         put_unaligned_le32(0, &sg_descriptor->flags);
5240 }
5241
5242 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5243         struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5244         int max_sg_per_iu, bool *chained)
5245 {
5246         int i;
5247         unsigned int num_sg_in_iu;
5248
5249         *chained = false;
5250         i = 0;
5251         num_sg_in_iu = 0;
5252         max_sg_per_iu--;        /* Subtract 1 to leave room for chain marker. */
5253
5254         while (1) {
5255                 pqi_set_sg_descriptor(sg_descriptor, sg);
5256                 if (!*chained)
5257                         num_sg_in_iu++;
5258                 i++;
5259                 if (i == sg_count)
5260                         break;
5261                 sg_descriptor++;
5262                 if (i == max_sg_per_iu) {
5263                         put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5264                                 &sg_descriptor->address);
5265                         put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5266                                 &sg_descriptor->length);
5267                         put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5268                         *chained = true;
5269                         num_sg_in_iu++;
5270                         sg_descriptor = io_request->sg_chain_buffer;
5271                 }
5272                 sg = sg_next(sg);
5273         }
5274
5275         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5276
5277         return num_sg_in_iu;
5278 }
5279
5280 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5281         struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5282         struct pqi_io_request *io_request)
5283 {
5284         u16 iu_length;
5285         int sg_count;
5286         bool chained;
5287         unsigned int num_sg_in_iu;
5288         struct scatterlist *sg;
5289         struct pqi_sg_descriptor *sg_descriptor;
5290
5291         sg_count = scsi_dma_map(scmd);
5292         if (sg_count < 0)
5293                 return sg_count;
5294
5295         iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5296                 PQI_REQUEST_HEADER_LENGTH;
5297
5298         if (sg_count == 0)
5299                 goto out;
5300
5301         sg = scsi_sglist(scmd);
5302         sg_descriptor = request->sg_descriptors;
5303
5304         num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5305                 ctrl_info->max_sg_per_iu, &chained);
5306
5307         request->partial = chained;
5308         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5309
5310 out:
5311         put_unaligned_le16(iu_length, &request->header.iu_length);
5312
5313         return 0;
5314 }
5315
5316 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5317         struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5318         struct pqi_io_request *io_request)
5319 {
5320         u16 iu_length;
5321         int sg_count;
5322         bool chained;
5323         unsigned int num_sg_in_iu;
5324         struct scatterlist *sg;
5325         struct pqi_sg_descriptor *sg_descriptor;
5326
5327         sg_count = scsi_dma_map(scmd);
5328         if (sg_count < 0)
5329                 return sg_count;
5330
5331         iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5332                 PQI_REQUEST_HEADER_LENGTH;
5333         num_sg_in_iu = 0;
5334
5335         if (sg_count == 0)
5336                 goto out;
5337
5338         sg = scsi_sglist(scmd);
5339         sg_descriptor = request->sg_descriptors;
5340
5341         num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5342                 ctrl_info->max_sg_per_iu, &chained);
5343
5344         request->partial = chained;
5345         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5346
5347 out:
5348         put_unaligned_le16(iu_length, &request->header.iu_length);
5349         request->num_sg_descriptors = num_sg_in_iu;
5350
5351         return 0;
5352 }
5353
5354 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5355         struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5356         struct pqi_io_request *io_request)
5357 {
5358         u16 iu_length;
5359         int sg_count;
5360         bool chained;
5361         unsigned int num_sg_in_iu;
5362         struct scatterlist *sg;
5363         struct pqi_sg_descriptor *sg_descriptor;
5364
5365         sg_count = scsi_dma_map(scmd);
5366         if (sg_count < 0)
5367                 return sg_count;
5368
5369         iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5370                 PQI_REQUEST_HEADER_LENGTH;
5371         num_sg_in_iu = 0;
5372
5373         if (sg_count != 0) {
5374                 sg = scsi_sglist(scmd);
5375                 sg_descriptor = request->sg_descriptors;
5376
5377                 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5378                         ctrl_info->max_sg_per_r56_iu, &chained);
5379
5380                 request->partial = chained;
5381                 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5382         }
5383
5384         put_unaligned_le16(iu_length, &request->header.iu_length);
5385         request->num_sg_descriptors = num_sg_in_iu;
5386
5387         return 0;
5388 }
5389
5390 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5391         struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5392         struct pqi_io_request *io_request)
5393 {
5394         u16 iu_length;
5395         int sg_count;
5396         bool chained;
5397         unsigned int num_sg_in_iu;
5398         struct scatterlist *sg;
5399         struct pqi_sg_descriptor *sg_descriptor;
5400
5401         sg_count = scsi_dma_map(scmd);
5402         if (sg_count < 0)
5403                 return sg_count;
5404
5405         iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5406                 PQI_REQUEST_HEADER_LENGTH;
5407         num_sg_in_iu = 0;
5408
5409         if (sg_count == 0)
5410                 goto out;
5411
5412         sg = scsi_sglist(scmd);
5413         sg_descriptor = request->sg_descriptors;
5414
5415         num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5416                 ctrl_info->max_sg_per_iu, &chained);
5417
5418         request->partial = chained;
5419         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5420
5421 out:
5422         put_unaligned_le16(iu_length, &request->header.iu_length);
5423         request->num_sg_descriptors = num_sg_in_iu;
5424
5425         return 0;
5426 }
5427
5428 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5429         void *context)
5430 {
5431         struct scsi_cmnd *scmd;
5432
5433         scmd = io_request->scmd;
5434         pqi_free_io_request(io_request);
5435         scsi_dma_unmap(scmd);
5436         pqi_scsi_done(scmd);
5437 }
5438
5439 static int pqi_raid_submit_scsi_cmd_with_io_request(
5440         struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
5441         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5442         struct pqi_queue_group *queue_group)
5443 {
5444         int rc;
5445         size_t cdb_length;
5446         struct pqi_raid_path_request *request;
5447
5448         io_request->io_complete_callback = pqi_raid_io_complete;
5449         io_request->scmd = scmd;
5450
5451         request = io_request->iu;
5452         memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5453
5454         request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5455         put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5456         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5457         put_unaligned_le16(io_request->index, &request->request_id);
5458         request->error_index = request->request_id;
5459         memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5460
5461         cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5462         memcpy(request->cdb, scmd->cmnd, cdb_length);
5463
5464         switch (cdb_length) {
5465         case 6:
5466         case 10:
5467         case 12:
5468         case 16:
5469                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5470                 break;
5471         case 20:
5472                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5473                 break;
5474         case 24:
5475                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5476                 break;
5477         case 28:
5478                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5479                 break;
5480         case 32:
5481         default:
5482                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5483                 break;
5484         }
5485
5486         switch (scmd->sc_data_direction) {
5487         case DMA_TO_DEVICE:
5488                 request->data_direction = SOP_READ_FLAG;
5489                 break;
5490         case DMA_FROM_DEVICE:
5491                 request->data_direction = SOP_WRITE_FLAG;
5492                 break;
5493         case DMA_NONE:
5494                 request->data_direction = SOP_NO_DIRECTION_FLAG;
5495                 break;
5496         case DMA_BIDIRECTIONAL:
5497                 request->data_direction = SOP_BIDIRECTIONAL;
5498                 break;
5499         default:
5500                 dev_err(&ctrl_info->pci_dev->dev,
5501                         "unknown data direction: %d\n",
5502                         scmd->sc_data_direction);
5503                 break;
5504         }
5505
5506         rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5507         if (rc) {
5508                 pqi_free_io_request(io_request);
5509                 return SCSI_MLQUEUE_HOST_BUSY;
5510         }
5511
5512         pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5513
5514         return 0;
5515 }
5516
5517 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5518         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5519         struct pqi_queue_group *queue_group)
5520 {
5521         struct pqi_io_request *io_request;
5522
5523         io_request = pqi_alloc_io_request(ctrl_info);
5524
5525         return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5526                 device, scmd, queue_group);
5527 }
5528
5529 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5530 {
5531         struct scsi_cmnd *scmd;
5532         struct pqi_scsi_dev *device;
5533         struct pqi_ctrl_info *ctrl_info;
5534
5535         if (!io_request->raid_bypass)
5536                 return false;
5537
5538         scmd = io_request->scmd;
5539         if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5540                 return false;
5541         if (host_byte(scmd->result) == DID_NO_CONNECT)
5542                 return false;
5543
5544         device = scmd->device->hostdata;
5545         if (pqi_device_offline(device) || pqi_device_in_remove(device))
5546                 return false;
5547
5548         ctrl_info = shost_to_hba(scmd->device->host);
5549         if (pqi_ctrl_offline(ctrl_info))
5550                 return false;
5551
5552         return true;
5553 }
5554
5555 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5556         void *context)
5557 {
5558         struct scsi_cmnd *scmd;
5559
5560         scmd = io_request->scmd;
5561         scsi_dma_unmap(scmd);
5562         if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5563                 set_host_byte(scmd, DID_IMM_RETRY);
5564                 pqi_cmd_priv(scmd)->this_residual++;
5565         }
5566
5567         pqi_free_io_request(io_request);
5568         pqi_scsi_done(scmd);
5569 }
5570
5571 static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info,
5572         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
5573 {
5574         bool io_high_prio;
5575         int priority_class;
5576
5577         io_high_prio = false;
5578
5579         if (device->ncq_prio_enable) {
5580                 priority_class =
5581                         IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
5582                 if (priority_class == IOPRIO_CLASS_RT) {
5583                         /* Set NCQ priority for read/write commands. */
5584                         switch (scmd->cmnd[0]) {
5585                         case WRITE_16:
5586                         case READ_16:
5587                         case WRITE_12:
5588                         case READ_12:
5589                         case WRITE_10:
5590                         case READ_10:
5591                         case WRITE_6:
5592                         case READ_6:
5593                                 io_high_prio = true;
5594                                 break;
5595                         }
5596                 }
5597         }
5598
5599         return io_high_prio;
5600 }
5601
5602 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5603         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5604         struct pqi_queue_group *queue_group)
5605 {
5606         bool io_high_prio;
5607
5608         io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd);
5609
5610         return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5611                 scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5612                 false, io_high_prio);
5613 }
5614
5615 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5616         struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5617         unsigned int cdb_length, struct pqi_queue_group *queue_group,
5618         struct pqi_encryption_info *encryption_info, bool raid_bypass,
5619         bool io_high_prio)
5620 {
5621         int rc;
5622         struct pqi_io_request *io_request;
5623         struct pqi_aio_path_request *request;
5624
5625         io_request = pqi_alloc_io_request(ctrl_info);
5626         io_request->io_complete_callback = pqi_aio_io_complete;
5627         io_request->scmd = scmd;
5628         io_request->raid_bypass = raid_bypass;
5629
5630         request = io_request->iu;
5631         memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
5632
5633         request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5634         put_unaligned_le32(aio_handle, &request->nexus_id);
5635         put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5636         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5637         request->command_priority = io_high_prio;
5638         put_unaligned_le16(io_request->index, &request->request_id);
5639         request->error_index = request->request_id;
5640         if (cdb_length > sizeof(request->cdb))
5641                 cdb_length = sizeof(request->cdb);
5642         request->cdb_length = cdb_length;
5643         memcpy(request->cdb, cdb, cdb_length);
5644
5645         switch (scmd->sc_data_direction) {
5646         case DMA_TO_DEVICE:
5647                 request->data_direction = SOP_READ_FLAG;
5648                 break;
5649         case DMA_FROM_DEVICE:
5650                 request->data_direction = SOP_WRITE_FLAG;
5651                 break;
5652         case DMA_NONE:
5653                 request->data_direction = SOP_NO_DIRECTION_FLAG;
5654                 break;
5655         case DMA_BIDIRECTIONAL:
5656                 request->data_direction = SOP_BIDIRECTIONAL;
5657                 break;
5658         default:
5659                 dev_err(&ctrl_info->pci_dev->dev,
5660                         "unknown data direction: %d\n",
5661                         scmd->sc_data_direction);
5662                 break;
5663         }
5664
5665         if (encryption_info) {
5666                 request->encryption_enable = true;
5667                 put_unaligned_le16(encryption_info->data_encryption_key_index,
5668                         &request->data_encryption_key_index);
5669                 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5670                         &request->encrypt_tweak_lower);
5671                 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5672                         &request->encrypt_tweak_upper);
5673         }
5674
5675         rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5676         if (rc) {
5677                 pqi_free_io_request(io_request);
5678                 return SCSI_MLQUEUE_HOST_BUSY;
5679         }
5680
5681         pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5682
5683         return 0;
5684 }
5685
5686 static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5687         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5688         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5689         struct pqi_scsi_dev_raid_map_data *rmd)
5690 {
5691         int rc;
5692         struct pqi_io_request *io_request;
5693         struct pqi_aio_r1_path_request *r1_request;
5694
5695         io_request = pqi_alloc_io_request(ctrl_info);
5696         io_request->io_complete_callback = pqi_aio_io_complete;
5697         io_request->scmd = scmd;
5698         io_request->raid_bypass = true;
5699
5700         r1_request = io_request->iu;
5701         memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5702
5703         r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5704         put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5705         r1_request->num_drives = rmd->num_it_nexus_entries;
5706         put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5707         put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5708         if (rmd->num_it_nexus_entries == 3)
5709                 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5710
5711         put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5712         r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5713         put_unaligned_le16(io_request->index, &r1_request->request_id);
5714         r1_request->error_index = r1_request->request_id;
5715         if (rmd->cdb_length > sizeof(r1_request->cdb))
5716                 rmd->cdb_length = sizeof(r1_request->cdb);
5717         r1_request->cdb_length = rmd->cdb_length;
5718         memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5719
5720         /* The direction is always write. */
5721         r1_request->data_direction = SOP_READ_FLAG;
5722
5723         if (encryption_info) {
5724                 r1_request->encryption_enable = true;
5725                 put_unaligned_le16(encryption_info->data_encryption_key_index,
5726                                 &r1_request->data_encryption_key_index);
5727                 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5728                                 &r1_request->encrypt_tweak_lower);
5729                 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5730                                 &r1_request->encrypt_tweak_upper);
5731         }
5732
5733         rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5734         if (rc) {
5735                 pqi_free_io_request(io_request);
5736                 return SCSI_MLQUEUE_HOST_BUSY;
5737         }
5738
5739         pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5740
5741         return 0;
5742 }
5743
5744 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5745         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5746         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5747         struct pqi_scsi_dev_raid_map_data *rmd)
5748 {
5749         int rc;
5750         struct pqi_io_request *io_request;
5751         struct pqi_aio_r56_path_request *r56_request;
5752
5753         io_request = pqi_alloc_io_request(ctrl_info);
5754         io_request->io_complete_callback = pqi_aio_io_complete;
5755         io_request->scmd = scmd;
5756         io_request->raid_bypass = true;
5757
5758         r56_request = io_request->iu;
5759         memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5760
5761         if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5762                 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5763         else
5764                 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5765
5766         put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5767         put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5768         put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5769         if (rmd->raid_level == SA_RAID_6) {
5770                 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5771                 r56_request->xor_multiplier = rmd->xor_mult;
5772         }
5773         put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5774         r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5775         put_unaligned_le64(rmd->row, &r56_request->row);
5776
5777         put_unaligned_le16(io_request->index, &r56_request->request_id);
5778         r56_request->error_index = r56_request->request_id;
5779
5780         if (rmd->cdb_length > sizeof(r56_request->cdb))
5781                 rmd->cdb_length = sizeof(r56_request->cdb);
5782         r56_request->cdb_length = rmd->cdb_length;
5783         memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5784
5785         /* The direction is always write. */
5786         r56_request->data_direction = SOP_READ_FLAG;
5787
5788         if (encryption_info) {
5789                 r56_request->encryption_enable = true;
5790                 put_unaligned_le16(encryption_info->data_encryption_key_index,
5791                                 &r56_request->data_encryption_key_index);
5792                 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5793                                 &r56_request->encrypt_tweak_lower);
5794                 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5795                                 &r56_request->encrypt_tweak_upper);
5796         }
5797
5798         rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5799         if (rc) {
5800                 pqi_free_io_request(io_request);
5801                 return SCSI_MLQUEUE_HOST_BUSY;
5802         }
5803
5804         pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5805
5806         return 0;
5807 }
5808
5809 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5810         struct scsi_cmnd *scmd)
5811 {
5812         u16 hw_queue;
5813
5814         hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
5815         if (hw_queue > ctrl_info->max_hw_queue_index)
5816                 hw_queue = 0;
5817
5818         return hw_queue;
5819 }
5820
5821 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5822 {
5823         if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5824                 return false;
5825
5826         return pqi_cmd_priv(scmd)->this_residual == 0;
5827 }
5828
5829 /*
5830  * This function gets called just before we hand the completed SCSI request
5831  * back to the SML.
5832  */
5833
5834 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5835 {
5836         struct pqi_scsi_dev *device;
5837
5838         if (!scmd->device) {
5839                 set_host_byte(scmd, DID_NO_CONNECT);
5840                 return;
5841         }
5842
5843         device = scmd->device->hostdata;
5844         if (!device) {
5845                 set_host_byte(scmd, DID_NO_CONNECT);
5846                 return;
5847         }
5848
5849         atomic_dec(&device->scsi_cmds_outstanding);
5850 }
5851
5852 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5853         struct scsi_cmnd *scmd)
5854 {
5855         u32 oldest_jiffies;
5856         u8 lru_index;
5857         int i;
5858         int rc;
5859         struct pqi_scsi_dev *device;
5860         struct pqi_stream_data *pqi_stream_data;
5861         struct pqi_scsi_dev_raid_map_data rmd;
5862
5863         if (!ctrl_info->enable_stream_detection)
5864                 return false;
5865
5866         rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5867         if (rc)
5868                 return false;
5869
5870         /* Check writes only. */
5871         if (!rmd.is_write)
5872                 return false;
5873
5874         device = scmd->device->hostdata;
5875
5876         /* Check for RAID 5/6 streams. */
5877         if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5878                 return false;
5879
5880         /*
5881          * If controller does not support AIO RAID{5,6} writes, need to send
5882          * requests down non-AIO path.
5883          */
5884         if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5885                 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5886                 return true;
5887
5888         lru_index = 0;
5889         oldest_jiffies = INT_MAX;
5890         for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5891                 pqi_stream_data = &device->stream_data[i];
5892                 /*
5893                  * Check for adjacent request or request is within
5894                  * the previous request.
5895                  */
5896                 if ((pqi_stream_data->next_lba &&
5897                         rmd.first_block >= pqi_stream_data->next_lba) &&
5898                         rmd.first_block <= pqi_stream_data->next_lba +
5899                                 rmd.block_cnt) {
5900                         pqi_stream_data->next_lba = rmd.first_block +
5901                                 rmd.block_cnt;
5902                         pqi_stream_data->last_accessed = jiffies;
5903                         return true;
5904                 }
5905
5906                 /* unused entry */
5907                 if (pqi_stream_data->last_accessed == 0) {
5908                         lru_index = i;
5909                         break;
5910                 }
5911
5912                 /* Find entry with oldest last accessed time. */
5913                 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
5914                         oldest_jiffies = pqi_stream_data->last_accessed;
5915                         lru_index = i;
5916                 }
5917         }
5918
5919         /* Set LRU entry. */
5920         pqi_stream_data = &device->stream_data[lru_index];
5921         pqi_stream_data->last_accessed = jiffies;
5922         pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
5923
5924         return false;
5925 }
5926
5927 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5928 {
5929         int rc;
5930         struct pqi_ctrl_info *ctrl_info;
5931         struct pqi_scsi_dev *device;
5932         u16 hw_queue;
5933         struct pqi_queue_group *queue_group;
5934         bool raid_bypassed;
5935
5936         device = scmd->device->hostdata;
5937
5938         if (!device) {
5939                 set_host_byte(scmd, DID_NO_CONNECT);
5940                 pqi_scsi_done(scmd);
5941                 return 0;
5942         }
5943
5944         atomic_inc(&device->scsi_cmds_outstanding);
5945
5946         ctrl_info = shost_to_hba(shost);
5947
5948         if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
5949                 set_host_byte(scmd, DID_NO_CONNECT);
5950                 pqi_scsi_done(scmd);
5951                 return 0;
5952         }
5953
5954         if (pqi_ctrl_blocked(ctrl_info)) {
5955                 rc = SCSI_MLQUEUE_HOST_BUSY;
5956                 goto out;
5957         }
5958
5959         /*
5960          * This is necessary because the SML doesn't zero out this field during
5961          * error recovery.
5962          */
5963         scmd->result = 0;
5964
5965         hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5966         queue_group = &ctrl_info->queue_groups[hw_queue];
5967
5968         if (pqi_is_logical_device(device)) {
5969                 raid_bypassed = false;
5970                 if (device->raid_bypass_enabled &&
5971                         pqi_is_bypass_eligible_request(scmd) &&
5972                         !pqi_is_parity_write_stream(ctrl_info, scmd)) {
5973                         rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5974                         if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
5975                                 raid_bypassed = true;
5976                                 atomic_inc(&device->raid_bypass_cnt);
5977                         }
5978                 }
5979                 if (!raid_bypassed)
5980                         rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5981         } else {
5982                 if (device->aio_enabled)
5983                         rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5984                 else
5985                         rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5986         }
5987
5988 out:
5989         if (rc)
5990                 atomic_dec(&device->scsi_cmds_outstanding);
5991
5992         return rc;
5993 }
5994
5995 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
5996 {
5997         unsigned int i;
5998         unsigned int path;
5999         unsigned long flags;
6000         unsigned int queued_io_count;
6001         struct pqi_queue_group *queue_group;
6002         struct pqi_io_request *io_request;
6003
6004         queued_io_count = 0;
6005
6006         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6007                 queue_group = &ctrl_info->queue_groups[i];
6008                 for (path = 0; path < 2; path++) {
6009                         spin_lock_irqsave(&queue_group->submit_lock[path], flags);
6010                         list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
6011                                 queued_io_count++;
6012                         spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
6013                 }
6014         }
6015
6016         return queued_io_count;
6017 }
6018
6019 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
6020 {
6021         unsigned int i;
6022         unsigned int path;
6023         unsigned int nonempty_inbound_queue_count;
6024         struct pqi_queue_group *queue_group;
6025         pqi_index_t iq_pi;
6026         pqi_index_t iq_ci;
6027
6028         nonempty_inbound_queue_count = 0;
6029
6030         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6031                 queue_group = &ctrl_info->queue_groups[i];
6032                 for (path = 0; path < 2; path++) {
6033                         iq_pi = queue_group->iq_pi_copy[path];
6034                         iq_ci = readl(queue_group->iq_ci[path]);
6035                         if (iq_ci != iq_pi)
6036                                 nonempty_inbound_queue_count++;
6037                 }
6038         }
6039
6040         return nonempty_inbound_queue_count;
6041 }
6042
6043 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS        10
6044
6045 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6046 {
6047         unsigned long start_jiffies;
6048         unsigned long warning_timeout;
6049         unsigned int queued_io_count;
6050         unsigned int nonempty_inbound_queue_count;
6051         bool displayed_warning;
6052
6053         displayed_warning = false;
6054         start_jiffies = jiffies;
6055         warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6056
6057         while (1) {
6058                 queued_io_count = pqi_queued_io_count(ctrl_info);
6059                 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6060                 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6061                         break;
6062                 pqi_check_ctrl_health(ctrl_info);
6063                 if (pqi_ctrl_offline(ctrl_info))
6064                         return -ENXIO;
6065                 if (time_after(jiffies, warning_timeout)) {
6066                         dev_warn(&ctrl_info->pci_dev->dev,
6067                                 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6068                                 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6069                         displayed_warning = true;
6070                         warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6071                 }
6072                 usleep_range(1000, 2000);
6073         }
6074
6075         if (displayed_warning)
6076                 dev_warn(&ctrl_info->pci_dev->dev,
6077                         "queued I/O drained after waiting for %u seconds\n",
6078                         jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6079
6080         return 0;
6081 }
6082
6083 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6084         struct pqi_scsi_dev *device)
6085 {
6086         unsigned int i;
6087         unsigned int path;
6088         struct pqi_queue_group *queue_group;
6089         unsigned long flags;
6090         struct pqi_io_request *io_request;
6091         struct pqi_io_request *next;
6092         struct scsi_cmnd *scmd;
6093         struct pqi_scsi_dev *scsi_device;
6094
6095         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6096                 queue_group = &ctrl_info->queue_groups[i];
6097
6098                 for (path = 0; path < 2; path++) {
6099                         spin_lock_irqsave(
6100                                 &queue_group->submit_lock[path], flags);
6101
6102                         list_for_each_entry_safe(io_request, next,
6103                                 &queue_group->request_list[path],
6104                                 request_list_entry) {
6105
6106                                 scmd = io_request->scmd;
6107                                 if (!scmd)
6108                                         continue;
6109
6110                                 scsi_device = scmd->device->hostdata;
6111                                 if (scsi_device != device)
6112                                         continue;
6113
6114                                 list_del(&io_request->request_list_entry);
6115                                 set_host_byte(scmd, DID_RESET);
6116                                 pqi_free_io_request(io_request);
6117                                 scsi_dma_unmap(scmd);
6118                                 pqi_scsi_done(scmd);
6119                         }
6120
6121                         spin_unlock_irqrestore(
6122                                 &queue_group->submit_lock[path], flags);
6123                 }
6124         }
6125 }
6126
6127 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS     10
6128
6129 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
6130         struct pqi_scsi_dev *device, unsigned long timeout_msecs)
6131 {
6132         int cmds_outstanding;
6133         unsigned long start_jiffies;
6134         unsigned long warning_timeout;
6135         unsigned long msecs_waiting;
6136
6137         start_jiffies = jiffies;
6138         warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6139
6140         while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) {
6141                 pqi_check_ctrl_health(ctrl_info);
6142                 if (pqi_ctrl_offline(ctrl_info))
6143                         return -ENXIO;
6144                 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6145                 if (msecs_waiting >= timeout_msecs) {
6146                         dev_err(&ctrl_info->pci_dev->dev,
6147                                 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6148                                 ctrl_info->scsi_host->host_no, device->bus, device->target,
6149                                 device->lun, msecs_waiting / 1000, cmds_outstanding);
6150                         return -ETIMEDOUT;
6151                 }
6152                 if (time_after(jiffies, warning_timeout)) {
6153                         dev_warn(&ctrl_info->pci_dev->dev,
6154                                 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6155                                 ctrl_info->scsi_host->host_no, device->bus, device->target,
6156                                 device->lun, msecs_waiting / 1000, cmds_outstanding);
6157                         warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6158                 }
6159                 usleep_range(1000, 2000);
6160         }
6161
6162         return 0;
6163 }
6164
6165 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6166         void *context)
6167 {
6168         struct completion *waiting = context;
6169
6170         complete(waiting);
6171 }
6172
6173 #define PQI_LUN_RESET_POLL_COMPLETION_SECS      10
6174
6175 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
6176         struct pqi_scsi_dev *device, struct completion *wait)
6177 {
6178         int rc;
6179         unsigned int wait_secs;
6180         int cmds_outstanding;
6181
6182         wait_secs = 0;
6183
6184         while (1) {
6185                 if (wait_for_completion_io_timeout(wait,
6186                         PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
6187                         rc = 0;
6188                         break;
6189                 }
6190
6191                 pqi_check_ctrl_health(ctrl_info);
6192                 if (pqi_ctrl_offline(ctrl_info)) {
6193                         rc = -ENXIO;
6194                         break;
6195                 }
6196
6197                 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
6198                 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding);
6199                 dev_warn(&ctrl_info->pci_dev->dev,
6200                         "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
6201                         ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, wait_secs, cmds_outstanding);
6202         }
6203
6204         return rc;
6205 }
6206
6207 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS     30
6208
6209 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
6210 {
6211         int rc;
6212         struct pqi_io_request *io_request;
6213         DECLARE_COMPLETION_ONSTACK(wait);
6214         struct pqi_task_management_request *request;
6215
6216         io_request = pqi_alloc_io_request(ctrl_info);
6217         io_request->io_complete_callback = pqi_lun_reset_complete;
6218         io_request->context = &wait;
6219
6220         request = io_request->iu;
6221         memset(request, 0, sizeof(*request));
6222
6223         request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6224         put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6225                 &request->header.iu_length);
6226         put_unaligned_le16(io_request->index, &request->request_id);
6227         memcpy(request->lun_number, device->scsi3addr,
6228                 sizeof(request->lun_number));
6229         request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
6230         if (ctrl_info->tmf_iu_timeout_supported)
6231                 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6232
6233         pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6234                 io_request);
6235
6236         rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
6237         if (rc == 0)
6238                 rc = io_request->status;
6239
6240         pqi_free_io_request(io_request);
6241
6242         return rc;
6243 }
6244
6245 #define PQI_LUN_RESET_RETRIES                           3
6246 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS              (10 * 1000)
6247 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS          (10 * 60 * 1000)
6248 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS   (2 * 60 * 1000)
6249
6250 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
6251 {
6252         int reset_rc;
6253         int wait_rc;
6254         unsigned int retries;
6255         unsigned long timeout_msecs;
6256
6257         for (retries = 0;;) {
6258                 reset_rc = pqi_lun_reset(ctrl_info, device);
6259                 if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
6260                         break;
6261                 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6262         }
6263
6264         timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6265                 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
6266
6267         wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs);
6268         if (wait_rc && reset_rc == 0)
6269                 reset_rc = wait_rc;
6270
6271         return reset_rc == 0 ? SUCCESS : FAILED;
6272 }
6273
6274 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
6275         struct pqi_scsi_dev *device)
6276 {
6277         int rc;
6278
6279         pqi_ctrl_block_requests(ctrl_info);
6280         pqi_ctrl_wait_until_quiesced(ctrl_info);
6281         pqi_fail_io_queued_for_device(ctrl_info, device);
6282         rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6283         if (rc)
6284                 rc = FAILED;
6285         else
6286                 rc = pqi_lun_reset_with_retries(ctrl_info, device);
6287         pqi_ctrl_unblock_requests(ctrl_info);
6288
6289         return rc;
6290 }
6291
6292 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6293 {
6294         int rc;
6295         struct Scsi_Host *shost;
6296         struct pqi_ctrl_info *ctrl_info;
6297         struct pqi_scsi_dev *device;
6298
6299         shost = scmd->device->host;
6300         ctrl_info = shost_to_hba(shost);
6301         device = scmd->device->hostdata;
6302
6303         mutex_lock(&ctrl_info->lun_reset_mutex);
6304
6305         dev_err(&ctrl_info->pci_dev->dev,
6306                 "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
6307                 shost->host_no,
6308                 device->bus, device->target, device->lun,
6309                 scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
6310
6311         pqi_check_ctrl_health(ctrl_info);
6312         if (pqi_ctrl_offline(ctrl_info))
6313                 rc = FAILED;
6314         else
6315                 rc = pqi_device_reset(ctrl_info, device);
6316
6317         dev_err(&ctrl_info->pci_dev->dev,
6318                 "reset of scsi %d:%d:%d:%d: %s\n",
6319                 shost->host_no, device->bus, device->target, device->lun,
6320                 rc == SUCCESS ? "SUCCESS" : "FAILED");
6321
6322         mutex_unlock(&ctrl_info->lun_reset_mutex);
6323
6324         return rc;
6325 }
6326
6327 static int pqi_slave_alloc(struct scsi_device *sdev)
6328 {
6329         struct pqi_scsi_dev *device;
6330         unsigned long flags;
6331         struct pqi_ctrl_info *ctrl_info;
6332         struct scsi_target *starget;
6333         struct sas_rphy *rphy;
6334
6335         ctrl_info = shost_to_hba(sdev->host);
6336
6337         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6338
6339         if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6340                 starget = scsi_target(sdev);
6341                 rphy = target_to_rphy(starget);
6342                 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6343                 if (device) {
6344                         if (device->target_lun_valid) {
6345                                 device->ignore_device = true;
6346                         } else {
6347                                 device->target = sdev_id(sdev);
6348                                 device->lun = sdev->lun;
6349                                 device->target_lun_valid = true;
6350                         }
6351                 }
6352         } else {
6353                 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6354                         sdev_id(sdev), sdev->lun);
6355         }
6356
6357         if (device) {
6358                 sdev->hostdata = device;
6359                 device->sdev = sdev;
6360                 if (device->queue_depth) {
6361                         device->advertised_queue_depth = device->queue_depth;
6362                         scsi_change_queue_depth(sdev,
6363                                 device->advertised_queue_depth);
6364                 }
6365                 if (pqi_is_logical_device(device)) {
6366                         pqi_disable_write_same(sdev);
6367                 } else {
6368                         sdev->allow_restart = 1;
6369                         if (device->device_type == SA_DEVICE_TYPE_NVME)
6370                                 pqi_disable_write_same(sdev);
6371                 }
6372         }
6373
6374         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6375
6376         return 0;
6377 }
6378
6379 static int pqi_map_queues(struct Scsi_Host *shost)
6380 {
6381         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6382
6383         return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6384                                         ctrl_info->pci_dev, 0);
6385 }
6386
6387 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6388 {
6389         return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6390 }
6391
6392 static int pqi_slave_configure(struct scsi_device *sdev)
6393 {
6394         int rc = 0;
6395         struct pqi_scsi_dev *device;
6396
6397         device = sdev->hostdata;
6398         device->devtype = sdev->type;
6399
6400         if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6401                 rc = -ENXIO;
6402                 device->ignore_device = false;
6403         }
6404
6405         return rc;
6406 }
6407
6408 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6409 {
6410         struct pci_dev *pci_dev;
6411         u32 subsystem_vendor;
6412         u32 subsystem_device;
6413         cciss_pci_info_struct pciinfo;
6414
6415         if (!arg)
6416                 return -EINVAL;
6417
6418         pci_dev = ctrl_info->pci_dev;
6419
6420         pciinfo.domain = pci_domain_nr(pci_dev->bus);
6421         pciinfo.bus = pci_dev->bus->number;
6422         pciinfo.dev_fn = pci_dev->devfn;
6423         subsystem_vendor = pci_dev->subsystem_vendor;
6424         subsystem_device = pci_dev->subsystem_device;
6425         pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6426
6427         if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
6428                 return -EFAULT;
6429
6430         return 0;
6431 }
6432
6433 static int pqi_getdrivver_ioctl(void __user *arg)
6434 {
6435         u32 version;
6436
6437         if (!arg)
6438                 return -EINVAL;
6439
6440         version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6441                 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6442
6443         if (copy_to_user(arg, &version, sizeof(version)))
6444                 return -EFAULT;
6445
6446         return 0;
6447 }
6448
6449 struct ciss_error_info {
6450         u8      scsi_status;
6451         int     command_status;
6452         size_t  sense_data_length;
6453 };
6454
6455 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6456         struct ciss_error_info *ciss_error_info)
6457 {
6458         int ciss_cmd_status;
6459         size_t sense_data_length;
6460
6461         switch (pqi_error_info->data_out_result) {
6462         case PQI_DATA_IN_OUT_GOOD:
6463                 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6464                 break;
6465         case PQI_DATA_IN_OUT_UNDERFLOW:
6466                 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6467                 break;
6468         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6469                 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6470                 break;
6471         case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6472         case PQI_DATA_IN_OUT_BUFFER_ERROR:
6473         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6474         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6475         case PQI_DATA_IN_OUT_ERROR:
6476                 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6477                 break;
6478         case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6479         case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6480         case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6481         case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6482         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6483         case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6484         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6485         case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6486         case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6487         case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6488                 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6489                 break;
6490         case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6491                 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6492                 break;
6493         case PQI_DATA_IN_OUT_ABORTED:
6494                 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6495                 break;
6496         case PQI_DATA_IN_OUT_TIMEOUT:
6497                 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6498                 break;
6499         default:
6500                 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6501                 break;
6502         }
6503
6504         sense_data_length =
6505                 get_unaligned_le16(&pqi_error_info->sense_data_length);
6506         if (sense_data_length == 0)
6507                 sense_data_length =
6508                 get_unaligned_le16(&pqi_error_info->response_data_length);
6509         if (sense_data_length)
6510                 if (sense_data_length > sizeof(pqi_error_info->data))
6511                         sense_data_length = sizeof(pqi_error_info->data);
6512
6513         ciss_error_info->scsi_status = pqi_error_info->status;
6514         ciss_error_info->command_status = ciss_cmd_status;
6515         ciss_error_info->sense_data_length = sense_data_length;
6516 }
6517
6518 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6519 {
6520         int rc;
6521         char *kernel_buffer = NULL;
6522         u16 iu_length;
6523         size_t sense_data_length;
6524         IOCTL_Command_struct iocommand;
6525         struct pqi_raid_path_request request;
6526         struct pqi_raid_error_info pqi_error_info;
6527         struct ciss_error_info ciss_error_info;
6528
6529         if (pqi_ctrl_offline(ctrl_info))
6530                 return -ENXIO;
6531         if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6532                 return -EBUSY;
6533         if (!arg)
6534                 return -EINVAL;
6535         if (!capable(CAP_SYS_RAWIO))
6536                 return -EPERM;
6537         if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6538                 return -EFAULT;
6539         if (iocommand.buf_size < 1 &&
6540                 iocommand.Request.Type.Direction != XFER_NONE)
6541                 return -EINVAL;
6542         if (iocommand.Request.CDBLen > sizeof(request.cdb))
6543                 return -EINVAL;
6544         if (iocommand.Request.Type.Type != TYPE_CMD)
6545                 return -EINVAL;
6546
6547         switch (iocommand.Request.Type.Direction) {
6548         case XFER_NONE:
6549         case XFER_WRITE:
6550         case XFER_READ:
6551         case XFER_READ | XFER_WRITE:
6552                 break;
6553         default:
6554                 return -EINVAL;
6555         }
6556
6557         if (iocommand.buf_size > 0) {
6558                 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6559                 if (!kernel_buffer)
6560                         return -ENOMEM;
6561                 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6562                         if (copy_from_user(kernel_buffer, iocommand.buf,
6563                                 iocommand.buf_size)) {
6564                                 rc = -EFAULT;
6565                                 goto out;
6566                         }
6567                 } else {
6568                         memset(kernel_buffer, 0, iocommand.buf_size);
6569                 }
6570         }
6571
6572         memset(&request, 0, sizeof(request));
6573
6574         request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6575         iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6576                 PQI_REQUEST_HEADER_LENGTH;
6577         memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6578                 sizeof(request.lun_number));
6579         memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6580         request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6581
6582         switch (iocommand.Request.Type.Direction) {
6583         case XFER_NONE:
6584                 request.data_direction = SOP_NO_DIRECTION_FLAG;
6585                 break;
6586         case XFER_WRITE:
6587                 request.data_direction = SOP_WRITE_FLAG;
6588                 break;
6589         case XFER_READ:
6590                 request.data_direction = SOP_READ_FLAG;
6591                 break;
6592         case XFER_READ | XFER_WRITE:
6593                 request.data_direction = SOP_BIDIRECTIONAL;
6594                 break;
6595         }
6596
6597         request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6598
6599         if (iocommand.buf_size > 0) {
6600                 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6601
6602                 rc = pqi_map_single(ctrl_info->pci_dev,
6603                         &request.sg_descriptors[0], kernel_buffer,
6604                         iocommand.buf_size, DMA_BIDIRECTIONAL);
6605                 if (rc)
6606                         goto out;
6607
6608                 iu_length += sizeof(request.sg_descriptors[0]);
6609         }
6610
6611         put_unaligned_le16(iu_length, &request.header.iu_length);
6612
6613         if (ctrl_info->raid_iu_timeout_supported)
6614                 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6615
6616         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6617                 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6618
6619         if (iocommand.buf_size > 0)
6620                 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6621                         DMA_BIDIRECTIONAL);
6622
6623         memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6624
6625         if (rc == 0) {
6626                 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6627                 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6628                 iocommand.error_info.CommandStatus =
6629                         ciss_error_info.command_status;
6630                 sense_data_length = ciss_error_info.sense_data_length;
6631                 if (sense_data_length) {
6632                         if (sense_data_length >
6633                                 sizeof(iocommand.error_info.SenseInfo))
6634                                 sense_data_length =
6635                                         sizeof(iocommand.error_info.SenseInfo);
6636                         memcpy(iocommand.error_info.SenseInfo,
6637                                 pqi_error_info.data, sense_data_length);
6638                         iocommand.error_info.SenseLen = sense_data_length;
6639                 }
6640         }
6641
6642         if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6643                 rc = -EFAULT;
6644                 goto out;
6645         }
6646
6647         if (rc == 0 && iocommand.buf_size > 0 &&
6648                 (iocommand.Request.Type.Direction & XFER_READ)) {
6649                 if (copy_to_user(iocommand.buf, kernel_buffer,
6650                         iocommand.buf_size)) {
6651                         rc = -EFAULT;
6652                 }
6653         }
6654
6655 out:
6656         kfree(kernel_buffer);
6657
6658         return rc;
6659 }
6660
6661 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6662                      void __user *arg)
6663 {
6664         int rc;
6665         struct pqi_ctrl_info *ctrl_info;
6666
6667         ctrl_info = shost_to_hba(sdev->host);
6668
6669         switch (cmd) {
6670         case CCISS_DEREGDISK:
6671         case CCISS_REGNEWDISK:
6672         case CCISS_REGNEWD:
6673                 rc = pqi_scan_scsi_devices(ctrl_info);
6674                 break;
6675         case CCISS_GETPCIINFO:
6676                 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6677                 break;
6678         case CCISS_GETDRIVVER:
6679                 rc = pqi_getdrivver_ioctl(arg);
6680                 break;
6681         case CCISS_PASSTHRU:
6682                 rc = pqi_passthru_ioctl(ctrl_info, arg);
6683                 break;
6684         default:
6685                 rc = -EINVAL;
6686                 break;
6687         }
6688
6689         return rc;
6690 }
6691
6692 static ssize_t pqi_firmware_version_show(struct device *dev,
6693         struct device_attribute *attr, char *buffer)
6694 {
6695         struct Scsi_Host *shost;
6696         struct pqi_ctrl_info *ctrl_info;
6697
6698         shost = class_to_shost(dev);
6699         ctrl_info = shost_to_hba(shost);
6700
6701         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6702 }
6703
6704 static ssize_t pqi_driver_version_show(struct device *dev,
6705         struct device_attribute *attr, char *buffer)
6706 {
6707         return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6708 }
6709
6710 static ssize_t pqi_serial_number_show(struct device *dev,
6711         struct device_attribute *attr, char *buffer)
6712 {
6713         struct Scsi_Host *shost;
6714         struct pqi_ctrl_info *ctrl_info;
6715
6716         shost = class_to_shost(dev);
6717         ctrl_info = shost_to_hba(shost);
6718
6719         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6720 }
6721
6722 static ssize_t pqi_model_show(struct device *dev,
6723         struct device_attribute *attr, char *buffer)
6724 {
6725         struct Scsi_Host *shost;
6726         struct pqi_ctrl_info *ctrl_info;
6727
6728         shost = class_to_shost(dev);
6729         ctrl_info = shost_to_hba(shost);
6730
6731         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6732 }
6733
6734 static ssize_t pqi_vendor_show(struct device *dev,
6735         struct device_attribute *attr, char *buffer)
6736 {
6737         struct Scsi_Host *shost;
6738         struct pqi_ctrl_info *ctrl_info;
6739
6740         shost = class_to_shost(dev);
6741         ctrl_info = shost_to_hba(shost);
6742
6743         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6744 }
6745
6746 static ssize_t pqi_host_rescan_store(struct device *dev,
6747         struct device_attribute *attr, const char *buffer, size_t count)
6748 {
6749         struct Scsi_Host *shost = class_to_shost(dev);
6750
6751         pqi_scan_start(shost);
6752
6753         return count;
6754 }
6755
6756 static ssize_t pqi_lockup_action_show(struct device *dev,
6757         struct device_attribute *attr, char *buffer)
6758 {
6759         int count = 0;
6760         unsigned int i;
6761
6762         for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6763                 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6764                         count += scnprintf(buffer + count, PAGE_SIZE - count,
6765                                 "[%s] ", pqi_lockup_actions[i].name);
6766                 else
6767                         count += scnprintf(buffer + count, PAGE_SIZE - count,
6768                                 "%s ", pqi_lockup_actions[i].name);
6769         }
6770
6771         count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6772
6773         return count;
6774 }
6775
6776 static ssize_t pqi_lockup_action_store(struct device *dev,
6777         struct device_attribute *attr, const char *buffer, size_t count)
6778 {
6779         unsigned int i;
6780         char *action_name;
6781         char action_name_buffer[32];
6782
6783         strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6784         action_name = strstrip(action_name_buffer);
6785
6786         for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6787                 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6788                         pqi_lockup_action = pqi_lockup_actions[i].action;
6789                         return count;
6790                 }
6791         }
6792
6793         return -EINVAL;
6794 }
6795
6796 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6797         struct device_attribute *attr, char *buffer)
6798 {
6799         struct Scsi_Host *shost = class_to_shost(dev);
6800         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6801
6802         return scnprintf(buffer, 10, "%x\n",
6803                         ctrl_info->enable_stream_detection);
6804 }
6805
6806 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
6807         struct device_attribute *attr, const char *buffer, size_t count)
6808 {
6809         struct Scsi_Host *shost = class_to_shost(dev);
6810         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6811         u8 set_stream_detection = 0;
6812
6813         if (kstrtou8(buffer, 0, &set_stream_detection))
6814                 return -EINVAL;
6815
6816         if (set_stream_detection > 0)
6817                 set_stream_detection = 1;
6818
6819         ctrl_info->enable_stream_detection = set_stream_detection;
6820
6821         return count;
6822 }
6823
6824 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
6825         struct device_attribute *attr, char *buffer)
6826 {
6827         struct Scsi_Host *shost = class_to_shost(dev);
6828         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6829
6830         return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
6831 }
6832
6833 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
6834         struct device_attribute *attr, const char *buffer, size_t count)
6835 {
6836         struct Scsi_Host *shost = class_to_shost(dev);
6837         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6838         u8 set_r5_writes = 0;
6839
6840         if (kstrtou8(buffer, 0, &set_r5_writes))
6841                 return -EINVAL;
6842
6843         if (set_r5_writes > 0)
6844                 set_r5_writes = 1;
6845
6846         ctrl_info->enable_r5_writes = set_r5_writes;
6847
6848         return count;
6849 }
6850
6851 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
6852         struct device_attribute *attr, char *buffer)
6853 {
6854         struct Scsi_Host *shost = class_to_shost(dev);
6855         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6856
6857         return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
6858 }
6859
6860 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
6861         struct device_attribute *attr, const char *buffer, size_t count)
6862 {
6863         struct Scsi_Host *shost = class_to_shost(dev);
6864         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6865         u8 set_r6_writes = 0;
6866
6867         if (kstrtou8(buffer, 0, &set_r6_writes))
6868                 return -EINVAL;
6869
6870         if (set_r6_writes > 0)
6871                 set_r6_writes = 1;
6872
6873         ctrl_info->enable_r6_writes = set_r6_writes;
6874
6875         return count;
6876 }
6877
6878 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6879 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6880 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6881 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6882 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6883 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6884 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
6885         pqi_lockup_action_store);
6886 static DEVICE_ATTR(enable_stream_detection, 0644,
6887         pqi_host_enable_stream_detection_show,
6888         pqi_host_enable_stream_detection_store);
6889 static DEVICE_ATTR(enable_r5_writes, 0644,
6890         pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
6891 static DEVICE_ATTR(enable_r6_writes, 0644,
6892         pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
6893
6894 static struct attribute *pqi_shost_attrs[] = {
6895         &dev_attr_driver_version.attr,
6896         &dev_attr_firmware_version.attr,
6897         &dev_attr_model.attr,
6898         &dev_attr_serial_number.attr,
6899         &dev_attr_vendor.attr,
6900         &dev_attr_rescan.attr,
6901         &dev_attr_lockup_action.attr,
6902         &dev_attr_enable_stream_detection.attr,
6903         &dev_attr_enable_r5_writes.attr,
6904         &dev_attr_enable_r6_writes.attr,
6905         NULL
6906 };
6907
6908 ATTRIBUTE_GROUPS(pqi_shost);
6909
6910 static ssize_t pqi_unique_id_show(struct device *dev,
6911         struct device_attribute *attr, char *buffer)
6912 {
6913         struct pqi_ctrl_info *ctrl_info;
6914         struct scsi_device *sdev;
6915         struct pqi_scsi_dev *device;
6916         unsigned long flags;
6917         u8 unique_id[16];
6918
6919         sdev = to_scsi_device(dev);
6920         ctrl_info = shost_to_hba(sdev->host);
6921
6922         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6923
6924         device = sdev->hostdata;
6925         if (!device) {
6926                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6927                 return -ENODEV;
6928         }
6929
6930         if (device->is_physical_device)
6931                 memcpy(unique_id, device->wwid, sizeof(device->wwid));
6932         else
6933                 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
6934
6935         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6936
6937         return scnprintf(buffer, PAGE_SIZE,
6938                 "%02X%02X%02X%02X%02X%02X%02X%02X"
6939                 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
6940                 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
6941                 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
6942                 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
6943                 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
6944 }
6945
6946 static ssize_t pqi_lunid_show(struct device *dev,
6947         struct device_attribute *attr, char *buffer)
6948 {
6949         struct pqi_ctrl_info *ctrl_info;
6950         struct scsi_device *sdev;
6951         struct pqi_scsi_dev *device;
6952         unsigned long flags;
6953         u8 lunid[8];
6954
6955         sdev = to_scsi_device(dev);
6956         ctrl_info = shost_to_hba(sdev->host);
6957
6958         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6959
6960         device = sdev->hostdata;
6961         if (!device) {
6962                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6963                 return -ENODEV;
6964         }
6965
6966         memcpy(lunid, device->scsi3addr, sizeof(lunid));
6967
6968         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6969
6970         return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6971 }
6972
6973 #define MAX_PATHS       8
6974
6975 static ssize_t pqi_path_info_show(struct device *dev,
6976         struct device_attribute *attr, char *buf)
6977 {
6978         struct pqi_ctrl_info *ctrl_info;
6979         struct scsi_device *sdev;
6980         struct pqi_scsi_dev *device;
6981         unsigned long flags;
6982         int i;
6983         int output_len = 0;
6984         u8 box;
6985         u8 bay;
6986         u8 path_map_index;
6987         char *active;
6988         u8 phys_connector[2];
6989
6990         sdev = to_scsi_device(dev);
6991         ctrl_info = shost_to_hba(sdev->host);
6992
6993         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6994
6995         device = sdev->hostdata;
6996         if (!device) {
6997                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6998                 return -ENODEV;
6999         }
7000
7001         bay = device->bay;
7002         for (i = 0; i < MAX_PATHS; i++) {
7003                 path_map_index = 1 << i;
7004                 if (i == device->active_path_index)
7005                         active = "Active";
7006                 else if (device->path_map & path_map_index)
7007                         active = "Inactive";
7008                 else
7009                         continue;
7010
7011                 output_len += scnprintf(buf + output_len,
7012                                         PAGE_SIZE - output_len,
7013                                         "[%d:%d:%d:%d] %20.20s ",
7014                                         ctrl_info->scsi_host->host_no,
7015                                         device->bus, device->target,
7016                                         device->lun,
7017                                         scsi_device_type(device->devtype));
7018
7019                 if (device->devtype == TYPE_RAID ||
7020                         pqi_is_logical_device(device))
7021                         goto end_buffer;
7022
7023                 memcpy(&phys_connector, &device->phys_connector[i],
7024                         sizeof(phys_connector));
7025                 if (phys_connector[0] < '0')
7026                         phys_connector[0] = '0';
7027                 if (phys_connector[1] < '0')
7028                         phys_connector[1] = '0';
7029
7030                 output_len += scnprintf(buf + output_len,
7031                                         PAGE_SIZE - output_len,
7032                                         "PORT: %.2s ", phys_connector);
7033
7034                 box = device->box[i];
7035                 if (box != 0 && box != 0xFF)
7036                         output_len += scnprintf(buf + output_len,
7037                                                 PAGE_SIZE - output_len,
7038                                                 "BOX: %hhu ", box);
7039
7040                 if ((device->devtype == TYPE_DISK ||
7041                         device->devtype == TYPE_ZBC) &&
7042                         pqi_expose_device(device))
7043                         output_len += scnprintf(buf + output_len,
7044                                                 PAGE_SIZE - output_len,
7045                                                 "BAY: %hhu ", bay);
7046
7047 end_buffer:
7048                 output_len += scnprintf(buf + output_len,
7049                                         PAGE_SIZE - output_len,
7050                                         "%s\n", active);
7051         }
7052
7053         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7054
7055         return output_len;
7056 }
7057
7058 static ssize_t pqi_sas_address_show(struct device *dev,
7059         struct device_attribute *attr, char *buffer)
7060 {
7061         struct pqi_ctrl_info *ctrl_info;
7062         struct scsi_device *sdev;
7063         struct pqi_scsi_dev *device;
7064         unsigned long flags;
7065         u64 sas_address;
7066
7067         sdev = to_scsi_device(dev);
7068         ctrl_info = shost_to_hba(sdev->host);
7069
7070         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7071
7072         device = sdev->hostdata;
7073         if (!device) {
7074                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7075                 return -ENODEV;
7076         }
7077
7078         sas_address = device->sas_address;
7079
7080         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7081
7082         return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
7083 }
7084
7085 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7086         struct device_attribute *attr, char *buffer)
7087 {
7088         struct pqi_ctrl_info *ctrl_info;
7089         struct scsi_device *sdev;
7090         struct pqi_scsi_dev *device;
7091         unsigned long flags;
7092
7093         sdev = to_scsi_device(dev);
7094         ctrl_info = shost_to_hba(sdev->host);
7095
7096         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7097
7098         device = sdev->hostdata;
7099         if (!device) {
7100                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7101                 return -ENODEV;
7102         }
7103
7104         buffer[0] = device->raid_bypass_enabled ? '1' : '0';
7105         buffer[1] = '\n';
7106         buffer[2] = '\0';
7107
7108         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7109
7110         return 2;
7111 }
7112
7113 static ssize_t pqi_raid_level_show(struct device *dev,
7114         struct device_attribute *attr, char *buffer)
7115 {
7116         struct pqi_ctrl_info *ctrl_info;
7117         struct scsi_device *sdev;
7118         struct pqi_scsi_dev *device;
7119         unsigned long flags;
7120         char *raid_level;
7121
7122         sdev = to_scsi_device(dev);
7123         ctrl_info = shost_to_hba(sdev->host);
7124
7125         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7126
7127         device = sdev->hostdata;
7128         if (!device) {
7129                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7130                 return -ENODEV;
7131         }
7132
7133         if (pqi_is_logical_device(device))
7134                 raid_level = pqi_raid_level_to_string(device->raid_level);
7135         else
7136                 raid_level = "N/A";
7137
7138         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7139
7140         return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
7141 }
7142
7143 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7144         struct device_attribute *attr, char *buffer)
7145 {
7146         struct pqi_ctrl_info *ctrl_info;
7147         struct scsi_device *sdev;
7148         struct pqi_scsi_dev *device;
7149         unsigned long flags;
7150         int raid_bypass_cnt;
7151
7152         sdev = to_scsi_device(dev);
7153         ctrl_info = shost_to_hba(sdev->host);
7154
7155         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7156
7157         device = sdev->hostdata;
7158         if (!device) {
7159                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7160                 return -ENODEV;
7161         }
7162
7163         raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
7164
7165         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7166
7167         return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
7168 }
7169
7170 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
7171                 struct device_attribute *attr, char *buf)
7172 {
7173         struct pqi_ctrl_info *ctrl_info;
7174         struct scsi_device *sdev;
7175         struct pqi_scsi_dev *device;
7176         unsigned long flags;
7177         int output_len = 0;
7178
7179         sdev = to_scsi_device(dev);
7180         ctrl_info = shost_to_hba(sdev->host);
7181
7182         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7183
7184         device = sdev->hostdata;
7185         if (!device) {
7186                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7187                 return -ENODEV;
7188         }
7189
7190         output_len = snprintf(buf, PAGE_SIZE, "%d\n",
7191                                 device->ncq_prio_enable);
7192         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7193
7194         return output_len;
7195 }
7196
7197 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
7198                         struct device_attribute *attr,
7199                         const char *buf, size_t count)
7200 {
7201         struct pqi_ctrl_info *ctrl_info;
7202         struct scsi_device *sdev;
7203         struct pqi_scsi_dev *device;
7204         unsigned long flags;
7205         u8 ncq_prio_enable = 0;
7206
7207         if (kstrtou8(buf, 0, &ncq_prio_enable))
7208                 return -EINVAL;
7209
7210         sdev = to_scsi_device(dev);
7211         ctrl_info = shost_to_hba(sdev->host);
7212
7213         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7214
7215         device = sdev->hostdata;
7216
7217         if (!device) {
7218                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7219                 return -ENODEV;
7220         }
7221
7222         if (!device->ncq_prio_support ||
7223                 !device->is_physical_device) {
7224                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7225                 return -EINVAL;
7226         }
7227
7228         device->ncq_prio_enable = ncq_prio_enable;
7229
7230         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7231
7232         return  strlen(buf);
7233 }
7234
7235 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7236 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7237 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
7238 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
7239 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
7240 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
7241 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
7242 static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
7243                 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
7244
7245 static struct attribute *pqi_sdev_attrs[] = {
7246         &dev_attr_lunid.attr,
7247         &dev_attr_unique_id.attr,
7248         &dev_attr_path_info.attr,
7249         &dev_attr_sas_address.attr,
7250         &dev_attr_ssd_smart_path_enabled.attr,
7251         &dev_attr_raid_level.attr,
7252         &dev_attr_raid_bypass_cnt.attr,
7253         &dev_attr_sas_ncq_prio_enable.attr,
7254         NULL
7255 };
7256
7257 ATTRIBUTE_GROUPS(pqi_sdev);
7258
7259 static struct scsi_host_template pqi_driver_template = {
7260         .module = THIS_MODULE,
7261         .name = DRIVER_NAME_SHORT,
7262         .proc_name = DRIVER_NAME_SHORT,
7263         .queuecommand = pqi_scsi_queue_command,
7264         .scan_start = pqi_scan_start,
7265         .scan_finished = pqi_scan_finished,
7266         .this_id = -1,
7267         .eh_device_reset_handler = pqi_eh_device_reset_handler,
7268         .ioctl = pqi_ioctl,
7269         .slave_alloc = pqi_slave_alloc,
7270         .slave_configure = pqi_slave_configure,
7271         .map_queues = pqi_map_queues,
7272         .sdev_groups = pqi_sdev_groups,
7273         .shost_groups = pqi_shost_groups,
7274         .cmd_size = sizeof(struct pqi_cmd_priv),
7275 };
7276
7277 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7278 {
7279         int rc;
7280         struct Scsi_Host *shost;
7281
7282         shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7283         if (!shost) {
7284                 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
7285                 return -ENOMEM;
7286         }
7287
7288         shost->io_port = 0;
7289         shost->n_io_port = 0;
7290         shost->this_id = -1;
7291         shost->max_channel = PQI_MAX_BUS;
7292         shost->max_cmd_len = MAX_COMMAND_SIZE;
7293         shost->max_lun = ~0;
7294         shost->max_id = ~0;
7295         shost->max_sectors = ctrl_info->max_sectors;
7296         shost->can_queue = ctrl_info->scsi_ml_can_queue;
7297         shost->cmd_per_lun = shost->can_queue;
7298         shost->sg_tablesize = ctrl_info->sg_tablesize;
7299         shost->transportt = pqi_sas_transport_template;
7300         shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
7301         shost->unique_id = shost->irq;
7302         shost->nr_hw_queues = ctrl_info->num_queue_groups;
7303         shost->host_tagset = 1;
7304         shost->hostdata[0] = (unsigned long)ctrl_info;
7305
7306         rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7307         if (rc) {
7308                 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
7309                 goto free_host;
7310         }
7311
7312         rc = pqi_add_sas_host(shost, ctrl_info);
7313         if (rc) {
7314                 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
7315                 goto remove_host;
7316         }
7317
7318         ctrl_info->scsi_host = shost;
7319
7320         return 0;
7321
7322 remove_host:
7323         scsi_remove_host(shost);
7324 free_host:
7325         scsi_host_put(shost);
7326
7327         return rc;
7328 }
7329
7330 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7331 {
7332         struct Scsi_Host *shost;
7333
7334         pqi_delete_sas_host(ctrl_info);
7335
7336         shost = ctrl_info->scsi_host;
7337         if (!shost)
7338                 return;
7339
7340         scsi_remove_host(shost);
7341         scsi_host_put(shost);
7342 }
7343
7344 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7345 {
7346         int rc = 0;
7347         struct pqi_device_registers __iomem *pqi_registers;
7348         unsigned long timeout;
7349         unsigned int timeout_msecs;
7350         union pqi_reset_register reset_reg;
7351
7352         pqi_registers = ctrl_info->pqi_registers;
7353         timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7354         timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7355
7356         while (1) {
7357                 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7358                 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7359                 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7360                         break;
7361                 pqi_check_ctrl_health(ctrl_info);
7362                 if (pqi_ctrl_offline(ctrl_info)) {
7363                         rc = -ENXIO;
7364                         break;
7365                 }
7366                 if (time_after(jiffies, timeout)) {
7367                         rc = -ETIMEDOUT;
7368                         break;
7369                 }
7370         }
7371
7372         return rc;
7373 }
7374
7375 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7376 {
7377         int rc;
7378         union pqi_reset_register reset_reg;
7379
7380         if (ctrl_info->pqi_reset_quiesce_supported) {
7381                 rc = sis_pqi_reset_quiesce(ctrl_info);
7382                 if (rc) {
7383                         dev_err(&ctrl_info->pci_dev->dev,
7384                                 "PQI reset failed during quiesce with error %d\n", rc);
7385                         return rc;
7386                 }
7387         }
7388
7389         reset_reg.all_bits = 0;
7390         reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7391         reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7392
7393         writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7394
7395         rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7396         if (rc)
7397                 dev_err(&ctrl_info->pci_dev->dev,
7398                         "PQI reset failed with error %d\n", rc);
7399
7400         return rc;
7401 }
7402
7403 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7404 {
7405         int rc;
7406         struct bmic_sense_subsystem_info *sense_info;
7407
7408         sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7409         if (!sense_info)
7410                 return -ENOMEM;
7411
7412         rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7413         if (rc)
7414                 goto out;
7415
7416         memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7417                 sizeof(sense_info->ctrl_serial_number));
7418         ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7419
7420 out:
7421         kfree(sense_info);
7422
7423         return rc;
7424 }
7425
7426 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7427 {
7428         int rc;
7429         struct bmic_identify_controller *identify;
7430
7431         identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7432         if (!identify)
7433                 return -ENOMEM;
7434
7435         rc = pqi_identify_controller(ctrl_info, identify);
7436         if (rc)
7437                 goto out;
7438
7439         if (get_unaligned_le32(&identify->extra_controller_flags) &
7440                 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7441                 memcpy(ctrl_info->firmware_version,
7442                         identify->firmware_version_long,
7443                         sizeof(identify->firmware_version_long));
7444         } else {
7445                 memcpy(ctrl_info->firmware_version,
7446                         identify->firmware_version_short,
7447                         sizeof(identify->firmware_version_short));
7448                 ctrl_info->firmware_version
7449                         [sizeof(identify->firmware_version_short)] = '\0';
7450                 snprintf(ctrl_info->firmware_version +
7451                         strlen(ctrl_info->firmware_version),
7452                         sizeof(ctrl_info->firmware_version) -
7453                         sizeof(identify->firmware_version_short),
7454                         "-%u",
7455                         get_unaligned_le16(&identify->firmware_build_number));
7456         }
7457
7458         memcpy(ctrl_info->model, identify->product_id,
7459                 sizeof(identify->product_id));
7460         ctrl_info->model[sizeof(identify->product_id)] = '\0';
7461
7462         memcpy(ctrl_info->vendor, identify->vendor_id,
7463                 sizeof(identify->vendor_id));
7464         ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7465
7466 out:
7467         kfree(identify);
7468
7469         return rc;
7470 }
7471
7472 struct pqi_config_table_section_info {
7473         struct pqi_ctrl_info *ctrl_info;
7474         void            *section;
7475         u32             section_offset;
7476         void __iomem    *section_iomem_addr;
7477 };
7478
7479 static inline bool pqi_is_firmware_feature_supported(
7480         struct pqi_config_table_firmware_features *firmware_features,
7481         unsigned int bit_position)
7482 {
7483         unsigned int byte_index;
7484
7485         byte_index = bit_position / BITS_PER_BYTE;
7486
7487         if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7488                 return false;
7489
7490         return firmware_features->features_supported[byte_index] &
7491                 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7492 }
7493
7494 static inline bool pqi_is_firmware_feature_enabled(
7495         struct pqi_config_table_firmware_features *firmware_features,
7496         void __iomem *firmware_features_iomem_addr,
7497         unsigned int bit_position)
7498 {
7499         unsigned int byte_index;
7500         u8 __iomem *features_enabled_iomem_addr;
7501
7502         byte_index = (bit_position / BITS_PER_BYTE) +
7503                 (le16_to_cpu(firmware_features->num_elements) * 2);
7504
7505         features_enabled_iomem_addr = firmware_features_iomem_addr +
7506                 offsetof(struct pqi_config_table_firmware_features,
7507                         features_supported) + byte_index;
7508
7509         return *((__force u8 *)features_enabled_iomem_addr) &
7510                 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7511 }
7512
7513 static inline void pqi_request_firmware_feature(
7514         struct pqi_config_table_firmware_features *firmware_features,
7515         unsigned int bit_position)
7516 {
7517         unsigned int byte_index;
7518
7519         byte_index = (bit_position / BITS_PER_BYTE) +
7520                 le16_to_cpu(firmware_features->num_elements);
7521
7522         firmware_features->features_supported[byte_index] |=
7523                 (1 << (bit_position % BITS_PER_BYTE));
7524 }
7525
7526 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7527         u16 first_section, u16 last_section)
7528 {
7529         struct pqi_vendor_general_request request;
7530
7531         memset(&request, 0, sizeof(request));
7532
7533         request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7534         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7535                 &request.header.iu_length);
7536         put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7537                 &request.function_code);
7538         put_unaligned_le16(first_section,
7539                 &request.data.config_table_update.first_section);
7540         put_unaligned_le16(last_section,
7541                 &request.data.config_table_update.last_section);
7542
7543         return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7544 }
7545
7546 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7547         struct pqi_config_table_firmware_features *firmware_features,
7548         void __iomem *firmware_features_iomem_addr)
7549 {
7550         void *features_requested;
7551         void __iomem *features_requested_iomem_addr;
7552         void __iomem *host_max_known_feature_iomem_addr;
7553
7554         features_requested = firmware_features->features_supported +
7555                 le16_to_cpu(firmware_features->num_elements);
7556
7557         features_requested_iomem_addr = firmware_features_iomem_addr +
7558                 (features_requested - (void *)firmware_features);
7559
7560         memcpy_toio(features_requested_iomem_addr, features_requested,
7561                 le16_to_cpu(firmware_features->num_elements));
7562
7563         if (pqi_is_firmware_feature_supported(firmware_features,
7564                 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7565                 host_max_known_feature_iomem_addr =
7566                         features_requested_iomem_addr +
7567                         (le16_to_cpu(firmware_features->num_elements) * 2) +
7568                         sizeof(__le16);
7569                 writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
7570                         host_max_known_feature_iomem_addr);
7571         }
7572
7573         return pqi_config_table_update(ctrl_info,
7574                 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7575                 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7576 }
7577
7578 struct pqi_firmware_feature {
7579         char            *feature_name;
7580         unsigned int    feature_bit;
7581         bool            supported;
7582         bool            enabled;
7583         void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7584                 struct pqi_firmware_feature *firmware_feature);
7585 };
7586
7587 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7588         struct pqi_firmware_feature *firmware_feature)
7589 {
7590         if (!firmware_feature->supported) {
7591                 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7592                         firmware_feature->feature_name);
7593                 return;
7594         }
7595
7596         if (firmware_feature->enabled) {
7597                 dev_info(&ctrl_info->pci_dev->dev,
7598                         "%s enabled\n", firmware_feature->feature_name);
7599                 return;
7600         }
7601
7602         dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7603                 firmware_feature->feature_name);
7604 }
7605
7606 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7607         struct pqi_firmware_feature *firmware_feature)
7608 {
7609         switch (firmware_feature->feature_bit) {
7610         case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7611                 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7612                 break;
7613         case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7614                 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7615                 break;
7616         case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7617                 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7618                 break;
7619         case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7620                 ctrl_info->soft_reset_handshake_supported =
7621                         firmware_feature->enabled &&
7622                         pqi_read_soft_reset_status(ctrl_info);
7623                 break;
7624         case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7625                 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7626                 break;
7627         case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7628                 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7629                 break;
7630         case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7631                 ctrl_info->firmware_triage_supported = firmware_feature->enabled;
7632                 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
7633                 break;
7634         case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7635                 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7636                 break;
7637         }
7638
7639         pqi_firmware_feature_status(ctrl_info, firmware_feature);
7640 }
7641
7642 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7643         struct pqi_firmware_feature *firmware_feature)
7644 {
7645         if (firmware_feature->feature_status)
7646                 firmware_feature->feature_status(ctrl_info, firmware_feature);
7647 }
7648
7649 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7650
7651 static struct pqi_firmware_feature pqi_firmware_features[] = {
7652         {
7653                 .feature_name = "Online Firmware Activation",
7654                 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7655                 .feature_status = pqi_firmware_feature_status,
7656         },
7657         {
7658                 .feature_name = "Serial Management Protocol",
7659                 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7660                 .feature_status = pqi_firmware_feature_status,
7661         },
7662         {
7663                 .feature_name = "Maximum Known Feature",
7664                 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7665                 .feature_status = pqi_firmware_feature_status,
7666         },
7667         {
7668                 .feature_name = "RAID 0 Read Bypass",
7669                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7670                 .feature_status = pqi_firmware_feature_status,
7671         },
7672         {
7673                 .feature_name = "RAID 1 Read Bypass",
7674                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7675                 .feature_status = pqi_firmware_feature_status,
7676         },
7677         {
7678                 .feature_name = "RAID 5 Read Bypass",
7679                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7680                 .feature_status = pqi_firmware_feature_status,
7681         },
7682         {
7683                 .feature_name = "RAID 6 Read Bypass",
7684                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7685                 .feature_status = pqi_firmware_feature_status,
7686         },
7687         {
7688                 .feature_name = "RAID 0 Write Bypass",
7689                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7690                 .feature_status = pqi_firmware_feature_status,
7691         },
7692         {
7693                 .feature_name = "RAID 1 Write Bypass",
7694                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7695                 .feature_status = pqi_ctrl_update_feature_flags,
7696         },
7697         {
7698                 .feature_name = "RAID 5 Write Bypass",
7699                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7700                 .feature_status = pqi_ctrl_update_feature_flags,
7701         },
7702         {
7703                 .feature_name = "RAID 6 Write Bypass",
7704                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7705                 .feature_status = pqi_ctrl_update_feature_flags,
7706         },
7707         {
7708                 .feature_name = "New Soft Reset Handshake",
7709                 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
7710                 .feature_status = pqi_ctrl_update_feature_flags,
7711         },
7712         {
7713                 .feature_name = "RAID IU Timeout",
7714                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
7715                 .feature_status = pqi_ctrl_update_feature_flags,
7716         },
7717         {
7718                 .feature_name = "TMF IU Timeout",
7719                 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
7720                 .feature_status = pqi_ctrl_update_feature_flags,
7721         },
7722         {
7723                 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
7724                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
7725                 .feature_status = pqi_firmware_feature_status,
7726         },
7727         {
7728                 .feature_name = "Firmware Triage",
7729                 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
7730                 .feature_status = pqi_ctrl_update_feature_flags,
7731         },
7732         {
7733                 .feature_name = "RPL Extended Formats 4 and 5",
7734                 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
7735                 .feature_status = pqi_ctrl_update_feature_flags,
7736         },
7737 };
7738
7739 static void pqi_process_firmware_features(
7740         struct pqi_config_table_section_info *section_info)
7741 {
7742         int rc;
7743         struct pqi_ctrl_info *ctrl_info;
7744         struct pqi_config_table_firmware_features *firmware_features;
7745         void __iomem *firmware_features_iomem_addr;
7746         unsigned int i;
7747         unsigned int num_features_supported;
7748
7749         ctrl_info = section_info->ctrl_info;
7750         firmware_features = section_info->section;
7751         firmware_features_iomem_addr = section_info->section_iomem_addr;
7752
7753         for (i = 0, num_features_supported = 0;
7754                 i < ARRAY_SIZE(pqi_firmware_features); i++) {
7755                 if (pqi_is_firmware_feature_supported(firmware_features,
7756                         pqi_firmware_features[i].feature_bit)) {
7757                         pqi_firmware_features[i].supported = true;
7758                         num_features_supported++;
7759                 } else {
7760                         pqi_firmware_feature_update(ctrl_info,
7761                                 &pqi_firmware_features[i]);
7762                 }
7763         }
7764
7765         if (num_features_supported == 0)
7766                 return;
7767
7768         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7769                 if (!pqi_firmware_features[i].supported)
7770                         continue;
7771                 pqi_request_firmware_feature(firmware_features,
7772                         pqi_firmware_features[i].feature_bit);
7773         }
7774
7775         rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
7776                 firmware_features_iomem_addr);
7777         if (rc) {
7778                 dev_err(&ctrl_info->pci_dev->dev,
7779                         "failed to enable firmware features in PQI configuration table\n");
7780                 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7781                         if (!pqi_firmware_features[i].supported)
7782                                 continue;
7783                         pqi_firmware_feature_update(ctrl_info,
7784                                 &pqi_firmware_features[i]);
7785                 }
7786                 return;
7787         }
7788
7789         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7790                 if (!pqi_firmware_features[i].supported)
7791                         continue;
7792                 if (pqi_is_firmware_feature_enabled(firmware_features,
7793                         firmware_features_iomem_addr,
7794                         pqi_firmware_features[i].feature_bit)) {
7795                                 pqi_firmware_features[i].enabled = true;
7796                 }
7797                 pqi_firmware_feature_update(ctrl_info,
7798                         &pqi_firmware_features[i]);
7799         }
7800 }
7801
7802 static void pqi_init_firmware_features(void)
7803 {
7804         unsigned int i;
7805
7806         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7807                 pqi_firmware_features[i].supported = false;
7808                 pqi_firmware_features[i].enabled = false;
7809         }
7810 }
7811
7812 static void pqi_process_firmware_features_section(
7813         struct pqi_config_table_section_info *section_info)
7814 {
7815         mutex_lock(&pqi_firmware_features_mutex);
7816         pqi_init_firmware_features();
7817         pqi_process_firmware_features(section_info);
7818         mutex_unlock(&pqi_firmware_features_mutex);
7819 }
7820
7821 /*
7822  * Reset all controller settings that can be initialized during the processing
7823  * of the PQI Configuration Table.
7824  */
7825
7826 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
7827 {
7828         ctrl_info->heartbeat_counter = NULL;
7829         ctrl_info->soft_reset_status = NULL;
7830         ctrl_info->soft_reset_handshake_supported = false;
7831         ctrl_info->enable_r1_writes = false;
7832         ctrl_info->enable_r5_writes = false;
7833         ctrl_info->enable_r6_writes = false;
7834         ctrl_info->raid_iu_timeout_supported = false;
7835         ctrl_info->tmf_iu_timeout_supported = false;
7836         ctrl_info->firmware_triage_supported = false;
7837         ctrl_info->rpl_extended_format_4_5_supported = false;
7838 }
7839
7840 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7841 {
7842         u32 table_length;
7843         u32 section_offset;
7844         bool firmware_feature_section_present;
7845         void __iomem *table_iomem_addr;
7846         struct pqi_config_table *config_table;
7847         struct pqi_config_table_section_header *section;
7848         struct pqi_config_table_section_info section_info;
7849         struct pqi_config_table_section_info feature_section_info;
7850
7851         table_length = ctrl_info->config_table_length;
7852         if (table_length == 0)
7853                 return 0;
7854
7855         config_table = kmalloc(table_length, GFP_KERNEL);
7856         if (!config_table) {
7857                 dev_err(&ctrl_info->pci_dev->dev,
7858                         "failed to allocate memory for PQI configuration table\n");
7859                 return -ENOMEM;
7860         }
7861
7862         /*
7863          * Copy the config table contents from I/O memory space into the
7864          * temporary buffer.
7865          */
7866         table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
7867         memcpy_fromio(config_table, table_iomem_addr, table_length);
7868
7869         firmware_feature_section_present = false;
7870         section_info.ctrl_info = ctrl_info;
7871         section_offset = get_unaligned_le32(&config_table->first_section_offset);
7872
7873         while (section_offset) {
7874                 section = (void *)config_table + section_offset;
7875
7876                 section_info.section = section;
7877                 section_info.section_offset = section_offset;
7878                 section_info.section_iomem_addr = table_iomem_addr + section_offset;
7879
7880                 switch (get_unaligned_le16(&section->section_id)) {
7881                 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7882                         firmware_feature_section_present = true;
7883                         feature_section_info = section_info;
7884                         break;
7885                 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
7886                         if (pqi_disable_heartbeat)
7887                                 dev_warn(&ctrl_info->pci_dev->dev,
7888                                 "heartbeat disabled by module parameter\n");
7889                         else
7890                                 ctrl_info->heartbeat_counter =
7891                                         table_iomem_addr +
7892                                         section_offset +
7893                                         offsetof(struct pqi_config_table_heartbeat,
7894                                                 heartbeat_counter);
7895                         break;
7896                 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7897                         ctrl_info->soft_reset_status =
7898                                 table_iomem_addr +
7899                                 section_offset +
7900                                 offsetof(struct pqi_config_table_soft_reset,
7901                                         soft_reset_status);
7902                         break;
7903                 }
7904
7905                 section_offset = get_unaligned_le16(&section->next_section_offset);
7906         }
7907
7908         /*
7909          * We process the firmware feature section after all other sections
7910          * have been processed so that the feature bit callbacks can take
7911          * into account the settings configured by other sections.
7912          */
7913         if (firmware_feature_section_present)
7914                 pqi_process_firmware_features_section(&feature_section_info);
7915
7916         kfree(config_table);
7917
7918         return 0;
7919 }
7920
7921 /* Switches the controller from PQI mode back into SIS mode. */
7922
7923 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7924 {
7925         int rc;
7926
7927         pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
7928         rc = pqi_reset(ctrl_info);
7929         if (rc)
7930                 return rc;
7931         rc = sis_reenable_sis_mode(ctrl_info);
7932         if (rc) {
7933                 dev_err(&ctrl_info->pci_dev->dev,
7934                         "re-enabling SIS mode failed with error %d\n", rc);
7935                 return rc;
7936         }
7937         pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7938
7939         return 0;
7940 }
7941
7942 /*
7943  * If the controller isn't already in SIS mode, this function forces it into
7944  * SIS mode.
7945  */
7946
7947 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
7948 {
7949         if (!sis_is_firmware_running(ctrl_info))
7950                 return -ENXIO;
7951
7952         if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
7953                 return 0;
7954
7955         if (sis_is_kernel_up(ctrl_info)) {
7956                 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7957                 return 0;
7958         }
7959
7960         return pqi_revert_to_sis_mode(ctrl_info);
7961 }
7962
7963 static void pqi_perform_lockup_action(void)
7964 {
7965         switch (pqi_lockup_action) {
7966         case PANIC:
7967                 panic("FATAL: Smart Family Controller lockup detected");
7968                 break;
7969         case REBOOT:
7970                 emergency_restart();
7971                 break;
7972         case NONE:
7973         default:
7974                 break;
7975         }
7976 }
7977
7978 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7979 {
7980         int rc;
7981         u32 product_id;
7982
7983         if (reset_devices) {
7984                 if (pqi_is_fw_triage_supported(ctrl_info)) {
7985                         rc = sis_wait_for_fw_triage_completion(ctrl_info);
7986                         if (rc)
7987                                 return rc;
7988                 }
7989                 sis_soft_reset(ctrl_info);
7990                 ssleep(PQI_POST_RESET_DELAY_SECS);
7991         } else {
7992                 rc = pqi_force_sis_mode(ctrl_info);
7993                 if (rc)
7994                         return rc;
7995         }
7996
7997         /*
7998          * Wait until the controller is ready to start accepting SIS
7999          * commands.
8000          */
8001         rc = sis_wait_for_ctrl_ready(ctrl_info);
8002         if (rc) {
8003                 if (reset_devices) {
8004                         dev_err(&ctrl_info->pci_dev->dev,
8005                                 "kdump init failed with error %d\n", rc);
8006                         pqi_lockup_action = REBOOT;
8007                         pqi_perform_lockup_action();
8008                 }
8009                 return rc;
8010         }
8011
8012         /*
8013          * Get the controller properties.  This allows us to determine
8014          * whether or not it supports PQI mode.
8015          */
8016         rc = sis_get_ctrl_properties(ctrl_info);
8017         if (rc) {
8018                 dev_err(&ctrl_info->pci_dev->dev,
8019                         "error obtaining controller properties\n");
8020                 return rc;
8021         }
8022
8023         rc = sis_get_pqi_capabilities(ctrl_info);
8024         if (rc) {
8025                 dev_err(&ctrl_info->pci_dev->dev,
8026                         "error obtaining controller capabilities\n");
8027                 return rc;
8028         }
8029
8030         product_id = sis_get_product_id(ctrl_info);
8031         ctrl_info->product_id = (u8)product_id;
8032         ctrl_info->product_revision = (u8)(product_id >> 8);
8033
8034         if (reset_devices) {
8035                 if (ctrl_info->max_outstanding_requests >
8036                         PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
8037                                 ctrl_info->max_outstanding_requests =
8038                                         PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
8039         } else {
8040                 if (ctrl_info->max_outstanding_requests >
8041                         PQI_MAX_OUTSTANDING_REQUESTS)
8042                                 ctrl_info->max_outstanding_requests =
8043                                         PQI_MAX_OUTSTANDING_REQUESTS;
8044         }
8045
8046         pqi_calculate_io_resources(ctrl_info);
8047
8048         rc = pqi_alloc_error_buffer(ctrl_info);
8049         if (rc) {
8050                 dev_err(&ctrl_info->pci_dev->dev,
8051                         "failed to allocate PQI error buffer\n");
8052                 return rc;
8053         }
8054
8055         /*
8056          * If the function we are about to call succeeds, the
8057          * controller will transition from legacy SIS mode
8058          * into PQI mode.
8059          */
8060         rc = sis_init_base_struct_addr(ctrl_info);
8061         if (rc) {
8062                 dev_err(&ctrl_info->pci_dev->dev,
8063                         "error initializing PQI mode\n");
8064                 return rc;
8065         }
8066
8067         /* Wait for the controller to complete the SIS -> PQI transition. */
8068         rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8069         if (rc) {
8070                 dev_err(&ctrl_info->pci_dev->dev,
8071                         "transition to PQI mode failed\n");
8072                 return rc;
8073         }
8074
8075         /* From here on, we are running in PQI mode. */
8076         ctrl_info->pqi_mode_enabled = true;
8077         pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8078
8079         rc = pqi_alloc_admin_queues(ctrl_info);
8080         if (rc) {
8081                 dev_err(&ctrl_info->pci_dev->dev,
8082                         "failed to allocate admin queues\n");
8083                 return rc;
8084         }
8085
8086         rc = pqi_create_admin_queues(ctrl_info);
8087         if (rc) {
8088                 dev_err(&ctrl_info->pci_dev->dev,
8089                         "error creating admin queues\n");
8090                 return rc;
8091         }
8092
8093         rc = pqi_report_device_capability(ctrl_info);
8094         if (rc) {
8095                 dev_err(&ctrl_info->pci_dev->dev,
8096                         "obtaining device capability failed\n");
8097                 return rc;
8098         }
8099
8100         rc = pqi_validate_device_capability(ctrl_info);
8101         if (rc)
8102                 return rc;
8103
8104         pqi_calculate_queue_resources(ctrl_info);
8105
8106         rc = pqi_enable_msix_interrupts(ctrl_info);
8107         if (rc)
8108                 return rc;
8109
8110         if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
8111                 ctrl_info->max_msix_vectors =
8112                         ctrl_info->num_msix_vectors_enabled;
8113                 pqi_calculate_queue_resources(ctrl_info);
8114         }
8115
8116         rc = pqi_alloc_io_resources(ctrl_info);
8117         if (rc)
8118                 return rc;
8119
8120         rc = pqi_alloc_operational_queues(ctrl_info);
8121         if (rc) {
8122                 dev_err(&ctrl_info->pci_dev->dev,
8123                         "failed to allocate operational queues\n");
8124                 return rc;
8125         }
8126
8127         pqi_init_operational_queues(ctrl_info);
8128
8129         rc = pqi_create_queues(ctrl_info);
8130         if (rc)
8131                 return rc;
8132
8133         rc = pqi_request_irqs(ctrl_info);
8134         if (rc)
8135                 return rc;
8136
8137         pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8138
8139         ctrl_info->controller_online = true;
8140
8141         rc = pqi_process_config_table(ctrl_info);
8142         if (rc)
8143                 return rc;
8144
8145         pqi_start_heartbeat_timer(ctrl_info);
8146
8147         if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8148                 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8149                 if (rc) { /* Supported features not returned correctly. */
8150                         dev_err(&ctrl_info->pci_dev->dev,
8151                                 "error obtaining advanced RAID bypass configuration\n");
8152                         return rc;
8153                 }
8154                 ctrl_info->ciss_report_log_flags |=
8155                         CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8156         }
8157
8158         rc = pqi_enable_events(ctrl_info);
8159         if (rc) {
8160                 dev_err(&ctrl_info->pci_dev->dev,
8161                         "error enabling events\n");
8162                 return rc;
8163         }
8164
8165         /* Register with the SCSI subsystem. */
8166         rc = pqi_register_scsi(ctrl_info);
8167         if (rc)
8168                 return rc;
8169
8170         rc = pqi_get_ctrl_product_details(ctrl_info);
8171         if (rc) {
8172                 dev_err(&ctrl_info->pci_dev->dev,
8173                         "error obtaining product details\n");
8174                 return rc;
8175         }
8176
8177         rc = pqi_get_ctrl_serial_number(ctrl_info);
8178         if (rc) {
8179                 dev_err(&ctrl_info->pci_dev->dev,
8180                         "error obtaining ctrl serial number\n");
8181                 return rc;
8182         }
8183
8184         rc = pqi_set_diag_rescan(ctrl_info);
8185         if (rc) {
8186                 dev_err(&ctrl_info->pci_dev->dev,
8187                         "error enabling multi-lun rescan\n");
8188                 return rc;
8189         }
8190
8191         rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8192         if (rc) {
8193                 dev_err(&ctrl_info->pci_dev->dev,
8194                         "error updating host wellness\n");
8195                 return rc;
8196         }
8197
8198         pqi_schedule_update_time_worker(ctrl_info);
8199
8200         pqi_scan_scsi_devices(ctrl_info);
8201
8202         return 0;
8203 }
8204
8205 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8206 {
8207         unsigned int i;
8208         struct pqi_admin_queues *admin_queues;
8209         struct pqi_event_queue *event_queue;
8210
8211         admin_queues = &ctrl_info->admin_queues;
8212         admin_queues->iq_pi_copy = 0;
8213         admin_queues->oq_ci_copy = 0;
8214         writel(0, admin_queues->oq_pi);
8215
8216         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8217                 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8218                 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8219                 ctrl_info->queue_groups[i].oq_ci_copy = 0;
8220
8221                 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8222                 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8223                 writel(0, ctrl_info->queue_groups[i].oq_pi);
8224         }
8225
8226         event_queue = &ctrl_info->event_queue;
8227         writel(0, event_queue->oq_pi);
8228         event_queue->oq_ci_copy = 0;
8229 }
8230
8231 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8232 {
8233         int rc;
8234
8235         rc = pqi_force_sis_mode(ctrl_info);
8236         if (rc)
8237                 return rc;
8238
8239         /*
8240          * Wait until the controller is ready to start accepting SIS
8241          * commands.
8242          */
8243         rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8244         if (rc)
8245                 return rc;
8246
8247         /*
8248          * Get the controller properties.  This allows us to determine
8249          * whether or not it supports PQI mode.
8250          */
8251         rc = sis_get_ctrl_properties(ctrl_info);
8252         if (rc) {
8253                 dev_err(&ctrl_info->pci_dev->dev,
8254                         "error obtaining controller properties\n");
8255                 return rc;
8256         }
8257
8258         rc = sis_get_pqi_capabilities(ctrl_info);
8259         if (rc) {
8260                 dev_err(&ctrl_info->pci_dev->dev,
8261                         "error obtaining controller capabilities\n");
8262                 return rc;
8263         }
8264
8265         /*
8266          * If the function we are about to call succeeds, the
8267          * controller will transition from legacy SIS mode
8268          * into PQI mode.
8269          */
8270         rc = sis_init_base_struct_addr(ctrl_info);
8271         if (rc) {
8272                 dev_err(&ctrl_info->pci_dev->dev,
8273                         "error initializing PQI mode\n");
8274                 return rc;
8275         }
8276
8277         /* Wait for the controller to complete the SIS -> PQI transition. */
8278         rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8279         if (rc) {
8280                 dev_err(&ctrl_info->pci_dev->dev,
8281                         "transition to PQI mode failed\n");
8282                 return rc;
8283         }
8284
8285         /* From here on, we are running in PQI mode. */
8286         ctrl_info->pqi_mode_enabled = true;
8287         pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8288
8289         pqi_reinit_queues(ctrl_info);
8290
8291         rc = pqi_create_admin_queues(ctrl_info);
8292         if (rc) {
8293                 dev_err(&ctrl_info->pci_dev->dev,
8294                         "error creating admin queues\n");
8295                 return rc;
8296         }
8297
8298         rc = pqi_create_queues(ctrl_info);
8299         if (rc)
8300                 return rc;
8301
8302         pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8303
8304         ctrl_info->controller_online = true;
8305         pqi_ctrl_unblock_requests(ctrl_info);
8306
8307         pqi_ctrl_reset_config(ctrl_info);
8308
8309         rc = pqi_process_config_table(ctrl_info);
8310         if (rc)
8311                 return rc;
8312
8313         pqi_start_heartbeat_timer(ctrl_info);
8314
8315         if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8316                 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8317                 if (rc) {
8318                         dev_err(&ctrl_info->pci_dev->dev,
8319                                 "error obtaining advanced RAID bypass configuration\n");
8320                         return rc;
8321                 }
8322                 ctrl_info->ciss_report_log_flags |=
8323                         CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8324         }
8325
8326         rc = pqi_enable_events(ctrl_info);
8327         if (rc) {
8328                 dev_err(&ctrl_info->pci_dev->dev,
8329                         "error enabling events\n");
8330                 return rc;
8331         }
8332
8333         rc = pqi_get_ctrl_product_details(ctrl_info);
8334         if (rc) {
8335                 dev_err(&ctrl_info->pci_dev->dev,
8336                         "error obtaining product details\n");
8337                 return rc;
8338         }
8339
8340         rc = pqi_set_diag_rescan(ctrl_info);
8341         if (rc) {
8342                 dev_err(&ctrl_info->pci_dev->dev,
8343                         "error enabling multi-lun rescan\n");
8344                 return rc;
8345         }
8346
8347         rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8348         if (rc) {
8349                 dev_err(&ctrl_info->pci_dev->dev,
8350                         "error updating host wellness\n");
8351                 return rc;
8352         }
8353
8354         if (pqi_ofa_in_progress(ctrl_info))
8355                 pqi_ctrl_unblock_scan(ctrl_info);
8356
8357         pqi_scan_scsi_devices(ctrl_info);
8358
8359         return 0;
8360 }
8361
8362 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
8363 {
8364         int rc;
8365
8366         rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
8367                 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8368
8369         return pcibios_err_to_errno(rc);
8370 }
8371
8372 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8373 {
8374         int rc;
8375         u64 mask;
8376
8377         rc = pci_enable_device(ctrl_info->pci_dev);
8378         if (rc) {
8379                 dev_err(&ctrl_info->pci_dev->dev,
8380                         "failed to enable PCI device\n");
8381                 return rc;
8382         }
8383
8384         if (sizeof(dma_addr_t) > 4)
8385                 mask = DMA_BIT_MASK(64);
8386         else
8387                 mask = DMA_BIT_MASK(32);
8388
8389         rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8390         if (rc) {
8391                 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8392                 goto disable_device;
8393         }
8394
8395         rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8396         if (rc) {
8397                 dev_err(&ctrl_info->pci_dev->dev,
8398                         "failed to obtain PCI resources\n");
8399                 goto disable_device;
8400         }
8401
8402         ctrl_info->iomem_base = ioremap(pci_resource_start(
8403                 ctrl_info->pci_dev, 0),
8404                 sizeof(struct pqi_ctrl_registers));
8405         if (!ctrl_info->iomem_base) {
8406                 dev_err(&ctrl_info->pci_dev->dev,
8407                         "failed to map memory for controller registers\n");
8408                 rc = -ENOMEM;
8409                 goto release_regions;
8410         }
8411
8412 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS               0x6
8413
8414         /* Increase the PCIe completion timeout. */
8415         rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8416                 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8417         if (rc) {
8418                 dev_err(&ctrl_info->pci_dev->dev,
8419                         "failed to set PCIe completion timeout\n");
8420                 goto release_regions;
8421         }
8422
8423         /* Enable bus mastering. */
8424         pci_set_master(ctrl_info->pci_dev);
8425
8426         ctrl_info->registers = ctrl_info->iomem_base;
8427         ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8428
8429         pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8430
8431         return 0;
8432
8433 release_regions:
8434         pci_release_regions(ctrl_info->pci_dev);
8435 disable_device:
8436         pci_disable_device(ctrl_info->pci_dev);
8437
8438         return rc;
8439 }
8440
8441 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8442 {
8443         iounmap(ctrl_info->iomem_base);
8444         pci_release_regions(ctrl_info->pci_dev);
8445         if (pci_is_enabled(ctrl_info->pci_dev))
8446                 pci_disable_device(ctrl_info->pci_dev);
8447         pci_set_drvdata(ctrl_info->pci_dev, NULL);
8448 }
8449
8450 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8451 {
8452         struct pqi_ctrl_info *ctrl_info;
8453
8454         ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8455                         GFP_KERNEL, numa_node);
8456         if (!ctrl_info)
8457                 return NULL;
8458
8459         mutex_init(&ctrl_info->scan_mutex);
8460         mutex_init(&ctrl_info->lun_reset_mutex);
8461         mutex_init(&ctrl_info->ofa_mutex);
8462
8463         INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8464         spin_lock_init(&ctrl_info->scsi_device_list_lock);
8465
8466         INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8467         atomic_set(&ctrl_info->num_interrupts, 0);
8468
8469         INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8470         INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8471
8472         timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8473         INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8474
8475         INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8476         INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8477
8478         sema_init(&ctrl_info->sync_request_sem,
8479                 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8480         init_waitqueue_head(&ctrl_info->block_requests_wait);
8481
8482         ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8483         ctrl_info->irq_mode = IRQ_MODE_NONE;
8484         ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8485
8486         ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8487         ctrl_info->max_transfer_encrypted_sas_sata =
8488                 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8489         ctrl_info->max_transfer_encrypted_nvme =
8490                 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8491         ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8492         ctrl_info->max_write_raid_1_10_2drive = ~0;
8493         ctrl_info->max_write_raid_1_10_3drive = ~0;
8494
8495         return ctrl_info;
8496 }
8497
8498 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8499 {
8500         kfree(ctrl_info);
8501 }
8502
8503 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8504 {
8505         pqi_free_irqs(ctrl_info);
8506         pqi_disable_msix_interrupts(ctrl_info);
8507 }
8508
8509 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8510 {
8511         pqi_stop_heartbeat_timer(ctrl_info);
8512         pqi_free_interrupts(ctrl_info);
8513         if (ctrl_info->queue_memory_base)
8514                 dma_free_coherent(&ctrl_info->pci_dev->dev,
8515                         ctrl_info->queue_memory_length,
8516                         ctrl_info->queue_memory_base,
8517                         ctrl_info->queue_memory_base_dma_handle);
8518         if (ctrl_info->admin_queue_memory_base)
8519                 dma_free_coherent(&ctrl_info->pci_dev->dev,
8520                         ctrl_info->admin_queue_memory_length,
8521                         ctrl_info->admin_queue_memory_base,
8522                         ctrl_info->admin_queue_memory_base_dma_handle);
8523         pqi_free_all_io_requests(ctrl_info);
8524         if (ctrl_info->error_buffer)
8525                 dma_free_coherent(&ctrl_info->pci_dev->dev,
8526                         ctrl_info->error_buffer_length,
8527                         ctrl_info->error_buffer,
8528                         ctrl_info->error_buffer_dma_handle);
8529         if (ctrl_info->iomem_base)
8530                 pqi_cleanup_pci_init(ctrl_info);
8531         pqi_free_ctrl_info(ctrl_info);
8532 }
8533
8534 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8535 {
8536         pqi_cancel_rescan_worker(ctrl_info);
8537         pqi_cancel_update_time_worker(ctrl_info);
8538         pqi_remove_all_scsi_devices(ctrl_info);
8539         pqi_unregister_scsi(ctrl_info);
8540         if (ctrl_info->pqi_mode_enabled)
8541                 pqi_revert_to_sis_mode(ctrl_info);
8542         pqi_free_ctrl_resources(ctrl_info);
8543 }
8544
8545 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8546 {
8547         pqi_ctrl_block_scan(ctrl_info);
8548         pqi_scsi_block_requests(ctrl_info);
8549         pqi_ctrl_block_device_reset(ctrl_info);
8550         pqi_ctrl_block_requests(ctrl_info);
8551         pqi_ctrl_wait_until_quiesced(ctrl_info);
8552         pqi_stop_heartbeat_timer(ctrl_info);
8553 }
8554
8555 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8556 {
8557         pqi_start_heartbeat_timer(ctrl_info);
8558         pqi_ctrl_unblock_requests(ctrl_info);
8559         pqi_ctrl_unblock_device_reset(ctrl_info);
8560         pqi_scsi_unblock_requests(ctrl_info);
8561         pqi_ctrl_unblock_scan(ctrl_info);
8562 }
8563
8564 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
8565 {
8566         int i;
8567         u32 sg_count;
8568         struct device *dev;
8569         struct pqi_ofa_memory *ofap;
8570         struct pqi_sg_descriptor *mem_descriptor;
8571         dma_addr_t dma_handle;
8572
8573         ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8574
8575         sg_count = DIV_ROUND_UP(total_size, chunk_size);
8576         if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
8577                 goto out;
8578
8579         ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
8580         if (!ctrl_info->pqi_ofa_chunk_virt_addr)
8581                 goto out;
8582
8583         dev = &ctrl_info->pci_dev->dev;
8584
8585         for (i = 0; i < sg_count; i++) {
8586                 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
8587                         dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8588                 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
8589                         goto out_free_chunks;
8590                 mem_descriptor = &ofap->sg_descriptor[i];
8591                 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8592                 put_unaligned_le32(chunk_size, &mem_descriptor->length);
8593         }
8594
8595         put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8596         put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
8597         put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
8598
8599         return 0;
8600
8601 out_free_chunks:
8602         while (--i >= 0) {
8603                 mem_descriptor = &ofap->sg_descriptor[i];
8604                 dma_free_coherent(dev, chunk_size,
8605                         ctrl_info->pqi_ofa_chunk_virt_addr[i],
8606                         get_unaligned_le64(&mem_descriptor->address));
8607         }
8608         kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8609
8610 out:
8611         return -ENOMEM;
8612 }
8613
8614 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
8615 {
8616         u32 total_size;
8617         u32 chunk_size;
8618         u32 min_chunk_size;
8619
8620         if (ctrl_info->ofa_bytes_requested == 0)
8621                 return 0;
8622
8623         total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
8624         min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
8625         min_chunk_size = PAGE_ALIGN(min_chunk_size);
8626
8627         for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
8628                 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
8629                         return 0;
8630                 chunk_size /= 2;
8631                 chunk_size = PAGE_ALIGN(chunk_size);
8632         }
8633
8634         return -ENOMEM;
8635 }
8636
8637 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
8638 {
8639         struct device *dev;
8640         struct pqi_ofa_memory *ofap;
8641
8642         dev = &ctrl_info->pci_dev->dev;
8643
8644         ofap = dma_alloc_coherent(dev, sizeof(*ofap),
8645                 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
8646         if (!ofap)
8647                 return;
8648
8649         ctrl_info->pqi_ofa_mem_virt_addr = ofap;
8650
8651         if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
8652                 dev_err(dev,
8653                         "failed to allocate host buffer for Online Firmware Activation\n");
8654                 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
8655                 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8656                 return;
8657         }
8658
8659         put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
8660         memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
8661 }
8662
8663 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
8664 {
8665         unsigned int i;
8666         struct device *dev;
8667         struct pqi_ofa_memory *ofap;
8668         struct pqi_sg_descriptor *mem_descriptor;
8669         unsigned int num_memory_descriptors;
8670
8671         ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8672         if (!ofap)
8673                 return;
8674
8675         dev = &ctrl_info->pci_dev->dev;
8676
8677         if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
8678                 goto out;
8679
8680         mem_descriptor = ofap->sg_descriptor;
8681         num_memory_descriptors =
8682                 get_unaligned_le16(&ofap->num_memory_descriptors);
8683
8684         for (i = 0; i < num_memory_descriptors; i++) {
8685                 dma_free_coherent(dev,
8686                         get_unaligned_le32(&mem_descriptor[i].length),
8687                         ctrl_info->pqi_ofa_chunk_virt_addr[i],
8688                         get_unaligned_le64(&mem_descriptor[i].address));
8689         }
8690         kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8691
8692 out:
8693         dma_free_coherent(dev, sizeof(*ofap), ofap,
8694                 ctrl_info->pqi_ofa_mem_dma_handle);
8695         ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8696 }
8697
8698 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
8699 {
8700         u32 buffer_length;
8701         struct pqi_vendor_general_request request;
8702         struct pqi_ofa_memory *ofap;
8703
8704         memset(&request, 0, sizeof(request));
8705
8706         request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
8707         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
8708                 &request.header.iu_length);
8709         put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
8710                 &request.function_code);
8711
8712         ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8713
8714         if (ofap) {
8715                 buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
8716                         get_unaligned_le16(&ofap->num_memory_descriptors) *
8717                         sizeof(struct pqi_sg_descriptor);
8718
8719                 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
8720                         &request.data.ofa_memory_allocation.buffer_address);
8721                 put_unaligned_le32(buffer_length,
8722                         &request.data.ofa_memory_allocation.buffer_length);
8723         }
8724
8725         return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
8726 }
8727
8728 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8729 {
8730         ssleep(delay_secs);
8731
8732         return pqi_ctrl_init_resume(ctrl_info);
8733 }
8734
8735 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
8736         .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
8737         .status = SAM_STAT_CHECK_CONDITION,
8738 };
8739
8740 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
8741 {
8742         unsigned int i;
8743         struct pqi_io_request *io_request;
8744         struct scsi_cmnd *scmd;
8745         struct scsi_device *sdev;
8746
8747         for (i = 0; i < ctrl_info->max_io_slots; i++) {
8748                 io_request = &ctrl_info->io_request_pool[i];
8749                 if (atomic_read(&io_request->refcount) == 0)
8750                         continue;
8751
8752                 scmd = io_request->scmd;
8753                 if (scmd) {
8754                         sdev = scmd->device;
8755                         if (!sdev || !scsi_device_online(sdev)) {
8756                                 pqi_free_io_request(io_request);
8757                                 continue;
8758                         } else {
8759                                 set_host_byte(scmd, DID_NO_CONNECT);
8760                         }
8761                 } else {
8762                         io_request->status = -ENXIO;
8763                         io_request->error_info =
8764                                 &pqi_ctrl_offline_raid_error_info;
8765                 }
8766
8767                 io_request->io_complete_callback(io_request,
8768                         io_request->context);
8769         }
8770 }
8771
8772 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
8773 {
8774         pqi_perform_lockup_action();
8775         pqi_stop_heartbeat_timer(ctrl_info);
8776         pqi_free_interrupts(ctrl_info);
8777         pqi_cancel_rescan_worker(ctrl_info);
8778         pqi_cancel_update_time_worker(ctrl_info);
8779         pqi_ctrl_wait_until_quiesced(ctrl_info);
8780         pqi_fail_all_outstanding_requests(ctrl_info);
8781         pqi_ctrl_unblock_requests(ctrl_info);
8782 }
8783
8784 static void pqi_ctrl_offline_worker(struct work_struct *work)
8785 {
8786         struct pqi_ctrl_info *ctrl_info;
8787
8788         ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
8789         pqi_take_ctrl_offline_deferred(ctrl_info);
8790 }
8791
8792 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
8793         enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
8794 {
8795         if (!ctrl_info->controller_online)
8796                 return;
8797
8798         ctrl_info->controller_online = false;
8799         ctrl_info->pqi_mode_enabled = false;
8800         pqi_ctrl_block_requests(ctrl_info);
8801         if (!pqi_disable_ctrl_shutdown)
8802                 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
8803         pci_disable_device(ctrl_info->pci_dev);
8804         dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
8805         schedule_work(&ctrl_info->ctrl_offline_work);
8806 }
8807
8808 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
8809         const struct pci_device_id *id)
8810 {
8811         char *ctrl_description;
8812
8813         if (id->driver_data)
8814                 ctrl_description = (char *)id->driver_data;
8815         else
8816                 ctrl_description = "Microchip Smart Family Controller";
8817
8818         dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
8819 }
8820
8821 static int pqi_pci_probe(struct pci_dev *pci_dev,
8822         const struct pci_device_id *id)
8823 {
8824         int rc;
8825         int node;
8826         struct pqi_ctrl_info *ctrl_info;
8827
8828         pqi_print_ctrl_info(pci_dev, id);
8829
8830         if (pqi_disable_device_id_wildcards &&
8831                 id->subvendor == PCI_ANY_ID &&
8832                 id->subdevice == PCI_ANY_ID) {
8833                 dev_warn(&pci_dev->dev,
8834                         "controller not probed because device ID wildcards are disabled\n");
8835                 return -ENODEV;
8836         }
8837
8838         if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
8839                 dev_warn(&pci_dev->dev,
8840                         "controller device ID matched using wildcards\n");
8841
8842         node = dev_to_node(&pci_dev->dev);
8843         if (node == NUMA_NO_NODE) {
8844                 node = cpu_to_node(0);
8845                 if (node == NUMA_NO_NODE)
8846                         node = 0;
8847                 set_dev_node(&pci_dev->dev, node);
8848         }
8849
8850         ctrl_info = pqi_alloc_ctrl_info(node);
8851         if (!ctrl_info) {
8852                 dev_err(&pci_dev->dev,
8853                         "failed to allocate controller info block\n");
8854                 return -ENOMEM;
8855         }
8856
8857         ctrl_info->pci_dev = pci_dev;
8858
8859         rc = pqi_pci_init(ctrl_info);
8860         if (rc)
8861                 goto error;
8862
8863         rc = pqi_ctrl_init(ctrl_info);
8864         if (rc)
8865                 goto error;
8866
8867         return 0;
8868
8869 error:
8870         pqi_remove_ctrl(ctrl_info);
8871
8872         return rc;
8873 }
8874
8875 static void pqi_pci_remove(struct pci_dev *pci_dev)
8876 {
8877         struct pqi_ctrl_info *ctrl_info;
8878
8879         ctrl_info = pci_get_drvdata(pci_dev);
8880         if (!ctrl_info)
8881                 return;
8882
8883         pqi_remove_ctrl(ctrl_info);
8884 }
8885
8886 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
8887 {
8888         unsigned int i;
8889         struct pqi_io_request *io_request;
8890         struct scsi_cmnd *scmd;
8891
8892         for (i = 0; i < ctrl_info->max_io_slots; i++) {
8893                 io_request = &ctrl_info->io_request_pool[i];
8894                 if (atomic_read(&io_request->refcount) == 0)
8895                         continue;
8896                 scmd = io_request->scmd;
8897                 WARN_ON(scmd != NULL); /* IO command from SML */
8898                 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
8899         }
8900 }
8901
8902 static void pqi_shutdown(struct pci_dev *pci_dev)
8903 {
8904         int rc;
8905         struct pqi_ctrl_info *ctrl_info;
8906         enum bmic_flush_cache_shutdown_event shutdown_event;
8907
8908         ctrl_info = pci_get_drvdata(pci_dev);
8909         if (!ctrl_info) {
8910                 dev_err(&pci_dev->dev,
8911                         "cache could not be flushed\n");
8912                 return;
8913         }
8914
8915         pqi_wait_until_ofa_finished(ctrl_info);
8916
8917         pqi_scsi_block_requests(ctrl_info);
8918         pqi_ctrl_block_device_reset(ctrl_info);
8919         pqi_ctrl_block_requests(ctrl_info);
8920         pqi_ctrl_wait_until_quiesced(ctrl_info);
8921
8922         if (system_state == SYSTEM_RESTART)
8923                 shutdown_event = RESTART;
8924         else
8925                 shutdown_event = SHUTDOWN;
8926
8927         /*
8928          * Write all data in the controller's battery-backed cache to
8929          * storage.
8930          */
8931         rc = pqi_flush_cache(ctrl_info, shutdown_event);
8932         if (rc)
8933                 dev_err(&pci_dev->dev,
8934                         "unable to flush controller cache\n");
8935
8936         pqi_crash_if_pending_command(ctrl_info);
8937         pqi_reset(ctrl_info);
8938 }
8939
8940 static void pqi_process_lockup_action_param(void)
8941 {
8942         unsigned int i;
8943
8944         if (!pqi_lockup_action_param)
8945                 return;
8946
8947         for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
8948                 if (strcmp(pqi_lockup_action_param,
8949                         pqi_lockup_actions[i].name) == 0) {
8950                         pqi_lockup_action = pqi_lockup_actions[i].action;
8951                         return;
8952                 }
8953         }
8954
8955         pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
8956                 DRIVER_NAME_SHORT, pqi_lockup_action_param);
8957 }
8958
8959 static void pqi_process_module_params(void)
8960 {
8961         pqi_process_lockup_action_param();
8962 }
8963
8964 #if defined(CONFIG_PM)
8965
8966 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
8967 {
8968         if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
8969                 return RESTART;
8970
8971         return SUSPEND;
8972 }
8973
8974 static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
8975 {
8976         struct pci_dev *pci_dev;
8977         struct pqi_ctrl_info *ctrl_info;
8978
8979         pci_dev = to_pci_dev(dev);
8980         ctrl_info = pci_get_drvdata(pci_dev);
8981
8982         pqi_wait_until_ofa_finished(ctrl_info);
8983
8984         pqi_ctrl_block_scan(ctrl_info);
8985         pqi_scsi_block_requests(ctrl_info);
8986         pqi_ctrl_block_device_reset(ctrl_info);
8987         pqi_ctrl_block_requests(ctrl_info);
8988         pqi_ctrl_wait_until_quiesced(ctrl_info);
8989
8990         if (suspend) {
8991                 enum bmic_flush_cache_shutdown_event shutdown_event;
8992
8993                 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
8994                 pqi_flush_cache(ctrl_info, shutdown_event);
8995         }
8996
8997         pqi_stop_heartbeat_timer(ctrl_info);
8998         pqi_crash_if_pending_command(ctrl_info);
8999         pqi_free_irqs(ctrl_info);
9000
9001         ctrl_info->controller_online = false;
9002         ctrl_info->pqi_mode_enabled = false;
9003
9004         return 0;
9005 }
9006
9007 static __maybe_unused int pqi_suspend(struct device *dev)
9008 {
9009         return pqi_suspend_or_freeze(dev, true);
9010 }
9011
9012 static int pqi_resume_or_restore(struct device *dev)
9013 {
9014         int rc;
9015         struct pci_dev *pci_dev;
9016         struct pqi_ctrl_info *ctrl_info;
9017
9018         pci_dev = to_pci_dev(dev);
9019         ctrl_info = pci_get_drvdata(pci_dev);
9020
9021         rc = pqi_request_irqs(ctrl_info);
9022         if (rc)
9023                 return rc;
9024
9025         pqi_ctrl_unblock_device_reset(ctrl_info);
9026         pqi_ctrl_unblock_requests(ctrl_info);
9027         pqi_scsi_unblock_requests(ctrl_info);
9028         pqi_ctrl_unblock_scan(ctrl_info);
9029
9030         ssleep(PQI_POST_RESET_DELAY_SECS);
9031
9032         return pqi_ctrl_init_resume(ctrl_info);
9033 }
9034
9035 static int pqi_freeze(struct device *dev)
9036 {
9037         return pqi_suspend_or_freeze(dev, false);
9038 }
9039
9040 static int pqi_thaw(struct device *dev)
9041 {
9042         int rc;
9043         struct pci_dev *pci_dev;
9044         struct pqi_ctrl_info *ctrl_info;
9045
9046         pci_dev = to_pci_dev(dev);
9047         ctrl_info = pci_get_drvdata(pci_dev);
9048
9049         rc = pqi_request_irqs(ctrl_info);
9050         if (rc)
9051                 return rc;
9052
9053         ctrl_info->controller_online = true;
9054         ctrl_info->pqi_mode_enabled = true;
9055
9056         pqi_ctrl_unblock_device_reset(ctrl_info);
9057         pqi_ctrl_unblock_requests(ctrl_info);
9058         pqi_scsi_unblock_requests(ctrl_info);
9059         pqi_ctrl_unblock_scan(ctrl_info);
9060
9061         return 0;
9062 }
9063
9064 static int pqi_poweroff(struct device *dev)
9065 {
9066         struct pci_dev *pci_dev;
9067         struct pqi_ctrl_info *ctrl_info;
9068         enum bmic_flush_cache_shutdown_event shutdown_event;
9069
9070         pci_dev = to_pci_dev(dev);
9071         ctrl_info = pci_get_drvdata(pci_dev);
9072
9073         shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9074         pqi_flush_cache(ctrl_info, shutdown_event);
9075
9076         return 0;
9077 }
9078
9079 static const struct dev_pm_ops pqi_pm_ops = {
9080         .suspend = pqi_suspend,
9081         .resume = pqi_resume_or_restore,
9082         .freeze = pqi_freeze,
9083         .thaw = pqi_thaw,
9084         .poweroff = pqi_poweroff,
9085         .restore = pqi_resume_or_restore,
9086 };
9087
9088 #endif /* CONFIG_PM */
9089
9090 /* Define the PCI IDs for the controllers that we support. */
9091 static const struct pci_device_id pqi_pci_id_table[] = {
9092         {
9093                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9094                                0x105b, 0x1211)
9095         },
9096         {
9097                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9098                                0x105b, 0x1321)
9099         },
9100         {
9101                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9102                                0x152d, 0x8a22)
9103         },
9104         {
9105                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9106                                0x152d, 0x8a23)
9107         },
9108         {
9109                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9110                                0x152d, 0x8a24)
9111         },
9112         {
9113                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9114                                0x152d, 0x8a36)
9115         },
9116         {
9117                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9118                                0x152d, 0x8a37)
9119         },
9120         {
9121                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9122                                0x193d, 0x1104)
9123         },
9124         {
9125                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9126                                0x193d, 0x1105)
9127         },
9128         {
9129                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9130                                0x193d, 0x1106)
9131         },
9132         {
9133                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9134                                0x193d, 0x1107)
9135         },
9136         {
9137                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9138                                0x193d, 0x1108)
9139         },
9140         {
9141                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9142                                0x193d, 0x1109)
9143         },
9144         {
9145                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9146                                0x193d, 0x8460)
9147         },
9148         {
9149                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9150                                0x193d, 0x8461)
9151         },
9152         {
9153                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9154                                0x193d, 0xc460)
9155         },
9156         {
9157                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9158                                0x193d, 0xc461)
9159         },
9160         {
9161                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9162                                0x193d, 0xf460)
9163         },
9164         {
9165                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9166                                0x193d, 0xf461)
9167         },
9168         {
9169                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9170                                0x1bd4, 0x0045)
9171         },
9172         {
9173                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9174                                0x1bd4, 0x0046)
9175         },
9176         {
9177                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9178                                0x1bd4, 0x0047)
9179         },
9180         {
9181                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9182                                0x1bd4, 0x0048)
9183         },
9184         {
9185                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9186                                0x1bd4, 0x004a)
9187         },
9188         {
9189                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9190                                0x1bd4, 0x004b)
9191         },
9192         {
9193                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9194                                0x1bd4, 0x004c)
9195         },
9196         {
9197                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9198                                0x1bd4, 0x004f)
9199         },
9200         {
9201                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9202                                0x1bd4, 0x0051)
9203         },
9204         {
9205                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9206                                0x1bd4, 0x0052)
9207         },
9208         {
9209                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9210                                0x1bd4, 0x0053)
9211         },
9212         {
9213                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9214                                0x1bd4, 0x0054)
9215         },
9216         {
9217                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9218                                0x1bd4, 0x006b)
9219         },
9220         {
9221                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9222                                0x1bd4, 0x006c)
9223         },
9224         {
9225                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9226                                0x1bd4, 0x006d)
9227         },
9228         {
9229                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9230                                0x1bd4, 0x006f)
9231         },
9232         {
9233                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9234                                0x1bd4, 0x0070)
9235         },
9236         {
9237                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9238                                0x1bd4, 0x0071)
9239         },
9240         {
9241                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9242                                0x1bd4, 0x0072)
9243         },
9244         {
9245                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9246                                0x19e5, 0xd227)
9247         },
9248         {
9249                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9250                                0x19e5, 0xd228)
9251         },
9252         {
9253                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9254                                0x19e5, 0xd229)
9255         },
9256         {
9257                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9258                                0x19e5, 0xd22a)
9259         },
9260         {
9261                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9262                                0x19e5, 0xd22b)
9263         },
9264         {
9265                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9266                                0x19e5, 0xd22c)
9267         },
9268         {
9269                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9270                                PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9271         },
9272         {
9273                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9274                                PCI_VENDOR_ID_ADAPTEC2, 0x0608)
9275         },
9276         {
9277                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9278                                PCI_VENDOR_ID_ADAPTEC2, 0x0800)
9279         },
9280         {
9281                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9282                                PCI_VENDOR_ID_ADAPTEC2, 0x0801)
9283         },
9284         {
9285                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9286                                PCI_VENDOR_ID_ADAPTEC2, 0x0802)
9287         },
9288         {
9289                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9290                                PCI_VENDOR_ID_ADAPTEC2, 0x0803)
9291         },
9292         {
9293                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9294                                PCI_VENDOR_ID_ADAPTEC2, 0x0804)
9295         },
9296         {
9297                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9298                                PCI_VENDOR_ID_ADAPTEC2, 0x0805)
9299         },
9300         {
9301                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9302                                PCI_VENDOR_ID_ADAPTEC2, 0x0806)
9303         },
9304         {
9305                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9306                                PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9307         },
9308         {
9309                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9310                                PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9311         },
9312         {
9313                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9314                                PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9315         },
9316         {
9317                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9318                                PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9319         },
9320         {
9321                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9322                                PCI_VENDOR_ID_ADAPTEC2, 0x0900)
9323         },
9324         {
9325                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9326                                PCI_VENDOR_ID_ADAPTEC2, 0x0901)
9327         },
9328         {
9329                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9330                                PCI_VENDOR_ID_ADAPTEC2, 0x0902)
9331         },
9332         {
9333                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9334                                PCI_VENDOR_ID_ADAPTEC2, 0x0903)
9335         },
9336         {
9337                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9338                                PCI_VENDOR_ID_ADAPTEC2, 0x0904)
9339         },
9340         {
9341                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9342                                PCI_VENDOR_ID_ADAPTEC2, 0x0905)
9343         },
9344         {
9345                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9346                                PCI_VENDOR_ID_ADAPTEC2, 0x0906)
9347         },
9348         {
9349                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9350                                PCI_VENDOR_ID_ADAPTEC2, 0x0907)
9351         },
9352         {
9353                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9354                                PCI_VENDOR_ID_ADAPTEC2, 0x0908)
9355         },
9356         {
9357                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9358                                PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9359         },
9360         {
9361                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9362                                PCI_VENDOR_ID_ADAPTEC2, 0x1200)
9363         },
9364         {
9365                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9366                                PCI_VENDOR_ID_ADAPTEC2, 0x1201)
9367         },
9368         {
9369                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9370                                PCI_VENDOR_ID_ADAPTEC2, 0x1202)
9371         },
9372         {
9373                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9374                                PCI_VENDOR_ID_ADAPTEC2, 0x1280)
9375         },
9376         {
9377                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9378                                PCI_VENDOR_ID_ADAPTEC2, 0x1281)
9379         },
9380         {
9381                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9382                                PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9383         },
9384         {
9385                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9386                                PCI_VENDOR_ID_ADAPTEC2, 0x1300)
9387         },
9388         {
9389                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9390                                PCI_VENDOR_ID_ADAPTEC2, 0x1301)
9391         },
9392         {
9393                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9394                                PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9395         },
9396         {
9397                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9398                                PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9399         },
9400         {
9401                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9402                                PCI_VENDOR_ID_ADAPTEC2, 0x1304)
9403         },
9404         {
9405                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9406                                PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9407         },
9408         {
9409                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9410                                PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9411         },
9412         {
9413                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9414                                PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9415         },
9416         {
9417                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9418                                PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9419         },
9420         {
9421                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9422                                PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9423         },
9424         {
9425                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9426                                PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9427         },
9428         {
9429                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9430                                PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9431         },
9432         {
9433                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9434                                PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9435         },
9436         {
9437                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9438                                PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9439         },
9440         {
9441                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9442                                PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9443         },
9444         {
9445                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9446                                PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9447         },
9448         {
9449                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9450                                PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9451         },
9452         {
9453                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9454                                PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9455         },
9456         {
9457                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9458                                PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9459         },
9460         {
9461                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9462                                PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9463         },
9464         {
9465                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9466                                PCI_VENDOR_ID_ADAPTEC2, 0x1463)
9467         },
9468         {
9469                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9470                                PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9471         },
9472         {
9473                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9474                                PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9475         },
9476         {
9477                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9478                                PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9479         },
9480         {
9481                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9482                                PCI_VENDOR_ID_ADAPTEC2, 0x1473)
9483         },
9484         {
9485                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9486                                PCI_VENDOR_ID_ADAPTEC2, 0x1474)
9487         },
9488         {
9489                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9490                                PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9491         },
9492         {
9493                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9494                                PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9495         },
9496         {
9497                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9498                                PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9499         },
9500         {
9501                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9502                                PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9503         },
9504         {
9505                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9506                                PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9507         },
9508         {
9509                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9510                                PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
9511         },
9512         {
9513                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9514                                PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
9515         },
9516         {
9517                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9518                                PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
9519         },
9520         {
9521                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9522                                PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
9523         },
9524         {
9525                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9526                                PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
9527         },
9528         {
9529                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9530                                PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
9531         },
9532         {
9533                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9534                                PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
9535         },
9536         {
9537                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9538                                PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
9539         },
9540         {
9541                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9542                                PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
9543         },
9544         {
9545                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9546                                PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
9547         },
9548         {
9549                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9550                                PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
9551         },
9552         {
9553                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9554                                PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
9555         },
9556         {
9557                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9558                                PCI_VENDOR_ID_ADVANTECH, 0x8312)
9559         },
9560         {
9561                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9562                                PCI_VENDOR_ID_DELL, 0x1fe0)
9563         },
9564         {
9565                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9566                                PCI_VENDOR_ID_HP, 0x0600)
9567         },
9568         {
9569                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9570                                PCI_VENDOR_ID_HP, 0x0601)
9571         },
9572         {
9573                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9574                                PCI_VENDOR_ID_HP, 0x0602)
9575         },
9576         {
9577                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9578                                PCI_VENDOR_ID_HP, 0x0603)
9579         },
9580         {
9581                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9582                                PCI_VENDOR_ID_HP, 0x0609)
9583         },
9584         {
9585                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9586                                PCI_VENDOR_ID_HP, 0x0650)
9587         },
9588         {
9589                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9590                                PCI_VENDOR_ID_HP, 0x0651)
9591         },
9592         {
9593                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9594                                PCI_VENDOR_ID_HP, 0x0652)
9595         },
9596         {
9597                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9598                                PCI_VENDOR_ID_HP, 0x0653)
9599         },
9600         {
9601                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9602                                PCI_VENDOR_ID_HP, 0x0654)
9603         },
9604         {
9605                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9606                                PCI_VENDOR_ID_HP, 0x0655)
9607         },
9608         {
9609                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9610                                PCI_VENDOR_ID_HP, 0x0700)
9611         },
9612         {
9613                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9614                                PCI_VENDOR_ID_HP, 0x0701)
9615         },
9616         {
9617                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9618                                PCI_VENDOR_ID_HP, 0x1001)
9619         },
9620         {
9621                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9622                                PCI_VENDOR_ID_HP, 0x1002)
9623         },
9624         {
9625                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9626                                PCI_VENDOR_ID_HP, 0x1100)
9627         },
9628         {
9629                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9630                                PCI_VENDOR_ID_HP, 0x1101)
9631         },
9632         {
9633                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9634                                0x1590, 0x0294)
9635         },
9636         {
9637                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9638                                0x1590, 0x02db)
9639         },
9640         {
9641                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9642                                0x1590, 0x02dc)
9643         },
9644         {
9645                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9646                                0x1590, 0x032e)
9647         },
9648         {
9649                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9650                                0x1590, 0x036f)
9651         },
9652         {
9653                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9654                                0x1590, 0x0381)
9655         },
9656         {
9657                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9658                                0x1590, 0x0382)
9659         },
9660         {
9661                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9662                                0x1590, 0x0383)
9663         },
9664         {
9665                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9666                                0x1d8d, 0x0800)
9667         },
9668         {
9669                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9670                                0x1d8d, 0x0908)
9671         },
9672         {
9673                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9674                                0x1d8d, 0x0806)
9675         },
9676         {
9677                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9678                                0x1d8d, 0x0916)
9679         },
9680         {
9681                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9682                                PCI_VENDOR_ID_GIGABYTE, 0x1000)
9683         },
9684         {
9685                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9686                                0x1dfc, 0x3161)
9687         },
9688         {
9689                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9690                                0x1f0c, 0x3161)
9691         },
9692         {
9693                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9694                                0x1cf2, 0x5445)
9695         },
9696         {
9697                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9698                                0x1cf2, 0x5446)
9699         },
9700         {
9701                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9702                                0x1cf2, 0x5447)
9703         },
9704         {
9705                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9706                                0x1cf2, 0x5449)
9707         },
9708         {
9709                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9710                                0x1cf2, 0x544a)
9711         },
9712         {
9713                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9714                                0x1cf2, 0x544b)
9715         },
9716         {
9717                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9718                                0x1cf2, 0x544d)
9719         },
9720         {
9721                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9722                                0x1cf2, 0x544e)
9723         },
9724         {
9725                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9726                                0x1cf2, 0x544f)
9727         },
9728         {
9729                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9730                                0x1cf2, 0x0b27)
9731         },
9732         {
9733                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9734                                0x1cf2, 0x0b29)
9735         },
9736         {
9737                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9738                                0x1cf2, 0x0b45)
9739         },
9740         {
9741                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9742                                PCI_ANY_ID, PCI_ANY_ID)
9743         },
9744         { 0 }
9745 };
9746
9747 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
9748
9749 static struct pci_driver pqi_pci_driver = {
9750         .name = DRIVER_NAME_SHORT,
9751         .id_table = pqi_pci_id_table,
9752         .probe = pqi_pci_probe,
9753         .remove = pqi_pci_remove,
9754         .shutdown = pqi_shutdown,
9755 #if defined(CONFIG_PM)
9756         .driver = {
9757                 .pm = &pqi_pm_ops
9758         },
9759 #endif
9760 };
9761
9762 static int __init pqi_init(void)
9763 {
9764         int rc;
9765
9766         pr_info(DRIVER_NAME "\n");
9767         pqi_verify_structures();
9768         sis_verify_structures();
9769
9770         pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
9771         if (!pqi_sas_transport_template)
9772                 return -ENODEV;
9773
9774         pqi_process_module_params();
9775
9776         rc = pci_register_driver(&pqi_pci_driver);
9777         if (rc)
9778                 sas_release_transport(pqi_sas_transport_template);
9779
9780         return rc;
9781 }
9782
9783 static void __exit pqi_cleanup(void)
9784 {
9785         pci_unregister_driver(&pqi_pci_driver);
9786         sas_release_transport(pqi_sas_transport_template);
9787 }
9788
9789 module_init(pqi_init);
9790 module_exit(pqi_cleanup);
9791
9792 static void pqi_verify_structures(void)
9793 {
9794         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9795                 sis_host_to_ctrl_doorbell) != 0x20);
9796         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9797                 sis_interrupt_mask) != 0x34);
9798         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9799                 sis_ctrl_to_host_doorbell) != 0x9c);
9800         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9801                 sis_ctrl_to_host_doorbell_clear) != 0xa0);
9802         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9803                 sis_driver_scratch) != 0xb0);
9804         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9805                 sis_product_identifier) != 0xb4);
9806         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9807                 sis_firmware_status) != 0xbc);
9808         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9809                 sis_ctrl_shutdown_reason_code) != 0xcc);
9810         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9811                 sis_mailbox) != 0x1000);
9812         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9813                 pqi_registers) != 0x4000);
9814
9815         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9816                 iu_type) != 0x0);
9817         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9818                 iu_length) != 0x2);
9819         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9820                 response_queue_id) != 0x4);
9821         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9822                 driver_flags) != 0x6);
9823         BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
9824
9825         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9826                 status) != 0x0);
9827         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9828                 service_response) != 0x1);
9829         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9830                 data_present) != 0x2);
9831         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9832                 reserved) != 0x3);
9833         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9834                 residual_count) != 0x4);
9835         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9836                 data_length) != 0x8);
9837         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9838                 reserved1) != 0xa);
9839         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9840                 data) != 0xc);
9841         BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
9842
9843         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9844                 data_in_result) != 0x0);
9845         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9846                 data_out_result) != 0x1);
9847         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9848                 reserved) != 0x2);
9849         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9850                 status) != 0x5);
9851         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9852                 status_qualifier) != 0x6);
9853         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9854                 sense_data_length) != 0x8);
9855         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9856                 response_data_length) != 0xa);
9857         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9858                 data_in_transferred) != 0xc);
9859         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9860                 data_out_transferred) != 0x10);
9861         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9862                 data) != 0x14);
9863         BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
9864
9865         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9866                 signature) != 0x0);
9867         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9868                 function_and_status_code) != 0x8);
9869         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9870                 max_admin_iq_elements) != 0x10);
9871         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9872                 max_admin_oq_elements) != 0x11);
9873         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9874                 admin_iq_element_length) != 0x12);
9875         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9876                 admin_oq_element_length) != 0x13);
9877         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9878                 max_reset_timeout) != 0x14);
9879         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9880                 legacy_intx_status) != 0x18);
9881         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9882                 legacy_intx_mask_set) != 0x1c);
9883         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9884                 legacy_intx_mask_clear) != 0x20);
9885         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9886                 device_status) != 0x40);
9887         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9888                 admin_iq_pi_offset) != 0x48);
9889         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9890                 admin_oq_ci_offset) != 0x50);
9891         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9892                 admin_iq_element_array_addr) != 0x58);
9893         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9894                 admin_oq_element_array_addr) != 0x60);
9895         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9896                 admin_iq_ci_addr) != 0x68);
9897         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9898                 admin_oq_pi_addr) != 0x70);
9899         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9900                 admin_iq_num_elements) != 0x78);
9901         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9902                 admin_oq_num_elements) != 0x79);
9903         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9904                 admin_queue_int_msg_num) != 0x7a);
9905         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9906                 device_error) != 0x80);
9907         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9908                 error_details) != 0x88);
9909         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9910                 device_reset) != 0x90);
9911         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9912                 power_action) != 0x94);
9913         BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
9914
9915         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9916                 header.iu_type) != 0);
9917         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9918                 header.iu_length) != 2);
9919         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9920                 header.driver_flags) != 6);
9921         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9922                 request_id) != 8);
9923         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9924                 function_code) != 10);
9925         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9926                 data.report_device_capability.buffer_length) != 44);
9927         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9928                 data.report_device_capability.sg_descriptor) != 48);
9929         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9930                 data.create_operational_iq.queue_id) != 12);
9931         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9932                 data.create_operational_iq.element_array_addr) != 16);
9933         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9934                 data.create_operational_iq.ci_addr) != 24);
9935         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9936                 data.create_operational_iq.num_elements) != 32);
9937         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9938                 data.create_operational_iq.element_length) != 34);
9939         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9940                 data.create_operational_iq.queue_protocol) != 36);
9941         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9942                 data.create_operational_oq.queue_id) != 12);
9943         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9944                 data.create_operational_oq.element_array_addr) != 16);
9945         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9946                 data.create_operational_oq.pi_addr) != 24);
9947         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9948                 data.create_operational_oq.num_elements) != 32);
9949         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9950                 data.create_operational_oq.element_length) != 34);
9951         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9952                 data.create_operational_oq.queue_protocol) != 36);
9953         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9954                 data.create_operational_oq.int_msg_num) != 40);
9955         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9956                 data.create_operational_oq.coalescing_count) != 42);
9957         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9958                 data.create_operational_oq.min_coalescing_time) != 44);
9959         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9960                 data.create_operational_oq.max_coalescing_time) != 48);
9961         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9962                 data.delete_operational_queue.queue_id) != 12);
9963         BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
9964         BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9965                 data.create_operational_iq) != 64 - 11);
9966         BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9967                 data.create_operational_oq) != 64 - 11);
9968         BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9969                 data.delete_operational_queue) != 64 - 11);
9970
9971         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9972                 header.iu_type) != 0);
9973         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9974                 header.iu_length) != 2);
9975         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9976                 header.driver_flags) != 6);
9977         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9978                 request_id) != 8);
9979         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9980                 function_code) != 10);
9981         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9982                 status) != 11);
9983         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9984                 data.create_operational_iq.status_descriptor) != 12);
9985         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9986                 data.create_operational_iq.iq_pi_offset) != 16);
9987         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9988                 data.create_operational_oq.status_descriptor) != 12);
9989         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9990                 data.create_operational_oq.oq_ci_offset) != 16);
9991         BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
9992
9993         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9994                 header.iu_type) != 0);
9995         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9996                 header.iu_length) != 2);
9997         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9998                 header.response_queue_id) != 4);
9999         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10000                 header.driver_flags) != 6);
10001         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10002                 request_id) != 8);
10003         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10004                 nexus_id) != 10);
10005         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10006                 buffer_length) != 12);
10007         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10008                 lun_number) != 16);
10009         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10010                 protocol_specific) != 24);
10011         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10012                 error_index) != 27);
10013         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10014                 cdb) != 32);
10015         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10016                 timeout) != 60);
10017         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10018                 sg_descriptors) != 64);
10019         BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10020                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10021
10022         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10023                 header.iu_type) != 0);
10024         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10025                 header.iu_length) != 2);
10026         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10027                 header.response_queue_id) != 4);
10028         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10029                 header.driver_flags) != 6);
10030         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10031                 request_id) != 8);
10032         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10033                 nexus_id) != 12);
10034         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10035                 buffer_length) != 16);
10036         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10037                 data_encryption_key_index) != 22);
10038         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10039                 encrypt_tweak_lower) != 24);
10040         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10041                 encrypt_tweak_upper) != 28);
10042         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10043                 cdb) != 32);
10044         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10045                 error_index) != 48);
10046         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10047                 num_sg_descriptors) != 50);
10048         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10049                 cdb_length) != 51);
10050         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10051                 lun_number) != 52);
10052         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10053                 sg_descriptors) != 64);
10054         BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10055                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10056
10057         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10058                 header.iu_type) != 0);
10059         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10060                 header.iu_length) != 2);
10061         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10062                 request_id) != 8);
10063         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10064                 error_index) != 10);
10065
10066         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10067                 header.iu_type) != 0);
10068         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10069                 header.iu_length) != 2);
10070         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10071                 header.response_queue_id) != 4);
10072         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10073                 request_id) != 8);
10074         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10075                 data.report_event_configuration.buffer_length) != 12);
10076         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10077                 data.report_event_configuration.sg_descriptors) != 16);
10078         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10079                 data.set_event_configuration.global_event_oq_id) != 10);
10080         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10081                 data.set_event_configuration.buffer_length) != 12);
10082         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10083                 data.set_event_configuration.sg_descriptors) != 16);
10084
10085         BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10086                 max_inbound_iu_length) != 6);
10087         BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10088                 max_outbound_iu_length) != 14);
10089         BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
10090
10091         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10092                 data_length) != 0);
10093         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10094                 iq_arbitration_priority_support_bitmask) != 8);
10095         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10096                 maximum_aw_a) != 9);
10097         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10098                 maximum_aw_b) != 10);
10099         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10100                 maximum_aw_c) != 11);
10101         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10102                 max_inbound_queues) != 16);
10103         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10104                 max_elements_per_iq) != 18);
10105         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10106                 max_iq_element_length) != 24);
10107         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10108                 min_iq_element_length) != 26);
10109         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10110                 max_outbound_queues) != 30);
10111         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10112                 max_elements_per_oq) != 32);
10113         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10114                 intr_coalescing_time_granularity) != 34);
10115         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10116                 max_oq_element_length) != 36);
10117         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10118                 min_oq_element_length) != 38);
10119         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10120                 iu_layer_descriptors) != 64);
10121         BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
10122
10123         BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10124                 event_type) != 0);
10125         BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10126                 oq_id) != 2);
10127         BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
10128
10129         BUILD_BUG_ON(offsetof(struct pqi_event_config,
10130                 num_event_descriptors) != 2);
10131         BUILD_BUG_ON(offsetof(struct pqi_event_config,
10132                 descriptors) != 4);
10133
10134         BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
10135                 ARRAY_SIZE(pqi_supported_event_types));
10136
10137         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10138                 header.iu_type) != 0);
10139         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10140                 header.iu_length) != 2);
10141         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10142                 event_type) != 8);
10143         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10144                 event_id) != 10);
10145         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10146                 additional_event_id) != 12);
10147         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10148                 data) != 16);
10149         BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
10150
10151         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10152                 header.iu_type) != 0);
10153         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10154                 header.iu_length) != 2);
10155         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10156                 event_type) != 8);
10157         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10158                 event_id) != 10);
10159         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10160                 additional_event_id) != 12);
10161         BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
10162
10163         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10164                 header.iu_type) != 0);
10165         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10166                 header.iu_length) != 2);
10167         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10168                 request_id) != 8);
10169         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10170                 nexus_id) != 10);
10171         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10172                 timeout) != 14);
10173         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10174                 lun_number) != 16);
10175         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10176                 protocol_specific) != 24);
10177         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10178                 outbound_queue_id_to_manage) != 26);
10179         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10180                 request_id_to_manage) != 28);
10181         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10182                 task_management_function) != 30);
10183         BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
10184
10185         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10186                 header.iu_type) != 0);
10187         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10188                 header.iu_length) != 2);
10189         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10190                 request_id) != 8);
10191         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10192                 nexus_id) != 10);
10193         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10194                 additional_response_info) != 12);
10195         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10196                 response_code) != 15);
10197         BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
10198
10199         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10200                 configured_logical_drive_count) != 0);
10201         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10202                 configuration_signature) != 1);
10203         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10204                 firmware_version_short) != 5);
10205         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10206                 extended_logical_unit_count) != 154);
10207         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10208                 firmware_build_number) != 190);
10209         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10210                 vendor_id) != 200);
10211         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10212                 product_id) != 208);
10213         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10214                 extra_controller_flags) != 286);
10215         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10216                 controller_mode) != 292);
10217         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10218                 spare_part_number) != 293);
10219         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10220                 firmware_version_long) != 325);
10221
10222         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10223                 phys_bay_in_box) != 115);
10224         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10225                 device_type) != 120);
10226         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10227                 redundant_path_present_map) != 1736);
10228         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10229                 active_path_number) != 1738);
10230         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10231                 alternate_paths_phys_connector) != 1739);
10232         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10233                 alternate_paths_phys_box_on_port) != 1755);
10234         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10235                 current_queue_depth_limit) != 1796);
10236         BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
10237
10238         BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
10239         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10240                 page_code) != 0);
10241         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10242                 subpage_code) != 1);
10243         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10244                 buffer_length) != 2);
10245
10246         BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
10247         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10248                 page_code) != 0);
10249         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10250                 subpage_code) != 1);
10251         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10252                 page_length) != 2);
10253
10254         BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
10255                 != 18);
10256         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10257                 header) != 0);
10258         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10259                 firmware_read_support) != 4);
10260         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10261                 driver_read_support) != 5);
10262         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10263                 firmware_write_support) != 6);
10264         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10265                 driver_write_support) != 7);
10266         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10267                 max_transfer_encrypted_sas_sata) != 8);
10268         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10269                 max_transfer_encrypted_nvme) != 10);
10270         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10271                 max_write_raid_5_6) != 12);
10272         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10273                 max_write_raid_1_10_2drive) != 14);
10274         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10275                 max_write_raid_1_10_3drive) != 16);
10276
10277         BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
10278         BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
10279         BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
10280                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10281         BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
10282                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10283         BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
10284         BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
10285                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10286         BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
10287         BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
10288                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10289
10290         BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
10291         BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
10292                 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
10293 }