GNU Linux-libre 4.9.294-gnu1
[releases.git] / drivers / scsi / smartpqi / smartpqi_init.c
1 /*
2  *    driver for Microsemi PQI-based storage controllers
3  *    Copyright (c) 2016 Microsemi Corporation
4  *    Copyright (c) 2016 PMC-Sierra, Inc.
5  *
6  *    This program is free software; you can redistribute it and/or modify
7  *    it under the terms of the GNU General Public License as published by
8  *    the Free Software Foundation; version 2 of the License.
9  *
10  *    This program is distributed in the hope that it will be useful,
11  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
14  *
15  *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16  *
17  */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/pci.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/rtc.h>
26 #include <linux/bcd.h>
27 #include <linux/cciss_ioctl.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_eh.h>
32 #include <scsi/scsi_transport_sas.h>
33 #include <asm/unaligned.h>
34 #include "smartpqi.h"
35 #include "smartpqi_sis.h"
36
37 #if !defined(BUILD_TIMESTAMP)
38 #define BUILD_TIMESTAMP
39 #endif
40
41 #define DRIVER_VERSION          "0.9.13-370"
42 #define DRIVER_MAJOR            0
43 #define DRIVER_MINOR            9
44 #define DRIVER_RELEASE          13
45 #define DRIVER_REVISION         370
46
47 #define DRIVER_NAME             "Microsemi PQI Driver (v" DRIVER_VERSION ")"
48 #define DRIVER_NAME_SHORT       "smartpqi"
49
50 MODULE_AUTHOR("Microsemi");
51 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
52         DRIVER_VERSION);
53 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
54 MODULE_VERSION(DRIVER_VERSION);
55 MODULE_LICENSE("GPL");
56
57 #define PQI_ENABLE_MULTI_QUEUE_SUPPORT  0
58
59 static char *hpe_branded_controller = "HPE Smart Array Controller";
60 static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
61
62 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
63 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
64 static void pqi_scan_start(struct Scsi_Host *shost);
65 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
66         struct pqi_queue_group *queue_group, enum pqi_io_path path,
67         struct pqi_io_request *io_request);
68 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
69         struct pqi_iu_header *request, unsigned int flags,
70         struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
71 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
72         struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
73         unsigned int cdb_length, struct pqi_queue_group *queue_group,
74         struct pqi_encryption_info *encryption_info);
75
76 /* for flags argument to pqi_submit_raid_request_synchronous() */
77 #define PQI_SYNC_FLAGS_INTERRUPTABLE    0x1
78
79 static struct scsi_transport_template *pqi_sas_transport_template;
80
81 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
82
83 static int pqi_disable_device_id_wildcards;
84 module_param_named(disable_device_id_wildcards,
85         pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
86 MODULE_PARM_DESC(disable_device_id_wildcards,
87         "Disable device ID wildcards.");
88
89 static char *raid_levels[] = {
90         "RAID-0",
91         "RAID-4",
92         "RAID-1(1+0)",
93         "RAID-5",
94         "RAID-5+1",
95         "RAID-ADG",
96         "RAID-1(ADM)",
97 };
98
99 static char *pqi_raid_level_to_string(u8 raid_level)
100 {
101         if (raid_level < ARRAY_SIZE(raid_levels))
102                 return raid_levels[raid_level];
103
104         return "";
105 }
106
107 #define SA_RAID_0               0
108 #define SA_RAID_4               1
109 #define SA_RAID_1               2       /* also used for RAID 10 */
110 #define SA_RAID_5               3       /* also used for RAID 50 */
111 #define SA_RAID_51              4
112 #define SA_RAID_6               5       /* also used for RAID 60 */
113 #define SA_RAID_ADM             6       /* also used for RAID 1+0 ADM */
114 #define SA_RAID_MAX             SA_RAID_ADM
115 #define SA_RAID_UNKNOWN         0xff
116
117 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
118 {
119         scmd->scsi_done(scmd);
120 }
121
122 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
123 {
124         return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
125 }
126
127 static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
128 {
129         void *hostdata = shost_priv(shost);
130
131         return *((struct pqi_ctrl_info **)hostdata);
132 }
133
134 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
135 {
136         return !device->is_physical_device;
137 }
138
139 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
140 {
141         return !ctrl_info->controller_online;
142 }
143
144 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
145 {
146         if (ctrl_info->controller_online)
147                 if (!sis_is_firmware_running(ctrl_info))
148                         pqi_take_ctrl_offline(ctrl_info);
149 }
150
151 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
152 {
153         return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
154 }
155
156 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
157         struct pqi_ctrl_info *ctrl_info)
158 {
159         return sis_read_driver_scratch(ctrl_info);
160 }
161
162 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
163         enum pqi_ctrl_mode mode)
164 {
165         sis_write_driver_scratch(ctrl_info, mode);
166 }
167
168 #define PQI_RESCAN_WORK_INTERVAL        (10 * HZ)
169
170 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
171 {
172         schedule_delayed_work(&ctrl_info->rescan_work,
173                 PQI_RESCAN_WORK_INTERVAL);
174 }
175
176 static int pqi_map_single(struct pci_dev *pci_dev,
177         struct pqi_sg_descriptor *sg_descriptor, void *buffer,
178         size_t buffer_length, int data_direction)
179 {
180         dma_addr_t bus_address;
181
182         if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
183                 return 0;
184
185         bus_address = pci_map_single(pci_dev, buffer, buffer_length,
186                 data_direction);
187         if (pci_dma_mapping_error(pci_dev, bus_address))
188                 return -ENOMEM;
189
190         put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
191         put_unaligned_le32(buffer_length, &sg_descriptor->length);
192         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
193
194         return 0;
195 }
196
197 static void pqi_pci_unmap(struct pci_dev *pci_dev,
198         struct pqi_sg_descriptor *descriptors, int num_descriptors,
199         int data_direction)
200 {
201         int i;
202
203         if (data_direction == PCI_DMA_NONE)
204                 return;
205
206         for (i = 0; i < num_descriptors; i++)
207                 pci_unmap_single(pci_dev,
208                         (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
209                         get_unaligned_le32(&descriptors[i].length),
210                         data_direction);
211 }
212
213 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
214         struct pqi_raid_path_request *request, u8 cmd,
215         u8 *scsi3addr, void *buffer, size_t buffer_length,
216         u16 vpd_page, int *pci_direction)
217 {
218         u8 *cdb;
219         int pci_dir;
220
221         memset(request, 0, sizeof(*request));
222
223         request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
224         put_unaligned_le16(offsetof(struct pqi_raid_path_request,
225                 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
226                 &request->header.iu_length);
227         put_unaligned_le32(buffer_length, &request->buffer_length);
228         memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
229         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
230         request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
231
232         cdb = request->cdb;
233
234         switch (cmd) {
235         case INQUIRY:
236                 request->data_direction = SOP_READ_FLAG;
237                 cdb[0] = INQUIRY;
238                 if (vpd_page & VPD_PAGE) {
239                         cdb[1] = 0x1;
240                         cdb[2] = (u8)vpd_page;
241                 }
242                 cdb[4] = (u8)buffer_length;
243                 break;
244         case CISS_REPORT_LOG:
245         case CISS_REPORT_PHYS:
246                 request->data_direction = SOP_READ_FLAG;
247                 cdb[0] = cmd;
248                 if (cmd == CISS_REPORT_PHYS)
249                         cdb[1] = CISS_REPORT_PHYS_EXTENDED;
250                 else
251                         cdb[1] = CISS_REPORT_LOG_EXTENDED;
252                 put_unaligned_be32(buffer_length, &cdb[6]);
253                 break;
254         case CISS_GET_RAID_MAP:
255                 request->data_direction = SOP_READ_FLAG;
256                 cdb[0] = CISS_READ;
257                 cdb[1] = CISS_GET_RAID_MAP;
258                 put_unaligned_be32(buffer_length, &cdb[6]);
259                 break;
260         case SA_CACHE_FLUSH:
261                 request->data_direction = SOP_WRITE_FLAG;
262                 cdb[0] = BMIC_WRITE;
263                 cdb[6] = BMIC_CACHE_FLUSH;
264                 put_unaligned_be16(buffer_length, &cdb[7]);
265                 break;
266         case BMIC_IDENTIFY_CONTROLLER:
267         case BMIC_IDENTIFY_PHYSICAL_DEVICE:
268                 request->data_direction = SOP_READ_FLAG;
269                 cdb[0] = BMIC_READ;
270                 cdb[6] = cmd;
271                 put_unaligned_be16(buffer_length, &cdb[7]);
272                 break;
273         case BMIC_WRITE_HOST_WELLNESS:
274                 request->data_direction = SOP_WRITE_FLAG;
275                 cdb[0] = BMIC_WRITE;
276                 cdb[6] = cmd;
277                 put_unaligned_be16(buffer_length, &cdb[7]);
278                 break;
279         default:
280                 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
281                         cmd);
282                 WARN_ON(cmd);
283                 break;
284         }
285
286         switch (request->data_direction) {
287         case SOP_READ_FLAG:
288                 pci_dir = PCI_DMA_FROMDEVICE;
289                 break;
290         case SOP_WRITE_FLAG:
291                 pci_dir = PCI_DMA_TODEVICE;
292                 break;
293         case SOP_NO_DIRECTION_FLAG:
294                 pci_dir = PCI_DMA_NONE;
295                 break;
296         default:
297                 pci_dir = PCI_DMA_BIDIRECTIONAL;
298                 break;
299         }
300
301         *pci_direction = pci_dir;
302
303         return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
304                 buffer, buffer_length, pci_dir);
305 }
306
307 static struct pqi_io_request *pqi_alloc_io_request(
308         struct pqi_ctrl_info *ctrl_info)
309 {
310         struct pqi_io_request *io_request;
311         u16 i = ctrl_info->next_io_request_slot;        /* benignly racy */
312
313         while (1) {
314                 io_request = &ctrl_info->io_request_pool[i];
315                 if (atomic_inc_return(&io_request->refcount) == 1)
316                         break;
317                 atomic_dec(&io_request->refcount);
318                 i = (i + 1) % ctrl_info->max_io_slots;
319         }
320
321         /* benignly racy */
322         ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
323
324         io_request->scmd = NULL;
325         io_request->status = 0;
326         io_request->error_info = NULL;
327
328         return io_request;
329 }
330
331 static void pqi_free_io_request(struct pqi_io_request *io_request)
332 {
333         atomic_dec(&io_request->refcount);
334 }
335
336 static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
337         struct bmic_identify_controller *buffer)
338 {
339         int rc;
340         int pci_direction;
341         struct pqi_raid_path_request request;
342
343         rc = pqi_build_raid_path_request(ctrl_info, &request,
344                 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
345                 sizeof(*buffer), 0, &pci_direction);
346         if (rc)
347                 return rc;
348
349         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
350                 NULL, NO_TIMEOUT);
351
352         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
353                 pci_direction);
354
355         return rc;
356 }
357
358 static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
359         u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
360 {
361         int rc;
362         int pci_direction;
363         struct pqi_raid_path_request request;
364
365         rc = pqi_build_raid_path_request(ctrl_info, &request,
366                 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
367                 &pci_direction);
368         if (rc)
369                 return rc;
370
371         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
372                 NULL, NO_TIMEOUT);
373
374         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
375                 pci_direction);
376
377         return rc;
378 }
379
380 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
381         struct pqi_scsi_dev *device,
382         struct bmic_identify_physical_device *buffer,
383         size_t buffer_length)
384 {
385         int rc;
386         int pci_direction;
387         u16 bmic_device_index;
388         struct pqi_raid_path_request request;
389
390         rc = pqi_build_raid_path_request(ctrl_info, &request,
391                 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
392                 buffer_length, 0, &pci_direction);
393         if (rc)
394                 return rc;
395
396         bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
397         request.cdb[2] = (u8)bmic_device_index;
398         request.cdb[9] = (u8)(bmic_device_index >> 8);
399
400         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
401                 0, NULL, NO_TIMEOUT);
402
403         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
404                 pci_direction);
405
406         return rc;
407 }
408
409 #define SA_CACHE_FLUSH_BUFFER_LENGTH    4
410
411 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
412 {
413         int rc;
414         struct pqi_raid_path_request request;
415         int pci_direction;
416         u8 *buffer;
417
418         /*
419          * Don't bother trying to flush the cache if the controller is
420          * locked up.
421          */
422         if (pqi_ctrl_offline(ctrl_info))
423                 return -ENXIO;
424
425         buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
426         if (!buffer)
427                 return -ENOMEM;
428
429         rc = pqi_build_raid_path_request(ctrl_info, &request,
430                 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
431                 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
432         if (rc)
433                 goto out;
434
435         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
436                 0, NULL, NO_TIMEOUT);
437
438         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
439                 pci_direction);
440
441 out:
442         kfree(buffer);
443
444         return rc;
445 }
446
447 static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
448         void *buffer, size_t buffer_length)
449 {
450         int rc;
451         struct pqi_raid_path_request request;
452         int pci_direction;
453
454         rc = pqi_build_raid_path_request(ctrl_info, &request,
455                 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
456                 buffer_length, 0, &pci_direction);
457         if (rc)
458                 return rc;
459
460         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
461                 0, NULL, NO_TIMEOUT);
462
463         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
464                 pci_direction);
465
466         return rc;
467 }
468
469 #pragma pack(1)
470
471 struct bmic_host_wellness_driver_version {
472         u8      start_tag[4];
473         u8      driver_version_tag[2];
474         __le16  driver_version_length;
475         char    driver_version[32];
476         u8      dont_write_tag[2];
477         u8      end_tag[2];
478 };
479
480 #pragma pack()
481
482 static int pqi_write_driver_version_to_host_wellness(
483         struct pqi_ctrl_info *ctrl_info)
484 {
485         int rc;
486         struct bmic_host_wellness_driver_version *buffer;
487         size_t buffer_length;
488
489         buffer_length = sizeof(*buffer);
490
491         buffer = kmalloc(buffer_length, GFP_KERNEL);
492         if (!buffer)
493                 return -ENOMEM;
494
495         buffer->start_tag[0] = '<';
496         buffer->start_tag[1] = 'H';
497         buffer->start_tag[2] = 'W';
498         buffer->start_tag[3] = '>';
499         buffer->driver_version_tag[0] = 'D';
500         buffer->driver_version_tag[1] = 'V';
501         put_unaligned_le16(sizeof(buffer->driver_version),
502                 &buffer->driver_version_length);
503         strncpy(buffer->driver_version, DRIVER_VERSION,
504                 sizeof(buffer->driver_version) - 1);
505         buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
506         buffer->dont_write_tag[0] = 'D';
507         buffer->dont_write_tag[1] = 'W';
508         buffer->end_tag[0] = 'Z';
509         buffer->end_tag[1] = 'Z';
510
511         rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
512
513         kfree(buffer);
514
515         return rc;
516 }
517
518 #pragma pack(1)
519
520 struct bmic_host_wellness_time {
521         u8      start_tag[4];
522         u8      time_tag[2];
523         __le16  time_length;
524         u8      time[8];
525         u8      dont_write_tag[2];
526         u8      end_tag[2];
527 };
528
529 #pragma pack()
530
531 static int pqi_write_current_time_to_host_wellness(
532         struct pqi_ctrl_info *ctrl_info)
533 {
534         int rc;
535         struct bmic_host_wellness_time *buffer;
536         size_t buffer_length;
537         time64_t local_time;
538         unsigned int year;
539         struct tm tm;
540
541         buffer_length = sizeof(*buffer);
542
543         buffer = kmalloc(buffer_length, GFP_KERNEL);
544         if (!buffer)
545                 return -ENOMEM;
546
547         buffer->start_tag[0] = '<';
548         buffer->start_tag[1] = 'H';
549         buffer->start_tag[2] = 'W';
550         buffer->start_tag[3] = '>';
551         buffer->time_tag[0] = 'T';
552         buffer->time_tag[1] = 'D';
553         put_unaligned_le16(sizeof(buffer->time),
554                 &buffer->time_length);
555
556         local_time = ktime_get_real_seconds();
557         time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
558         year = tm.tm_year + 1900;
559
560         buffer->time[0] = bin2bcd(tm.tm_hour);
561         buffer->time[1] = bin2bcd(tm.tm_min);
562         buffer->time[2] = bin2bcd(tm.tm_sec);
563         buffer->time[3] = 0;
564         buffer->time[4] = bin2bcd(tm.tm_mon + 1);
565         buffer->time[5] = bin2bcd(tm.tm_mday);
566         buffer->time[6] = bin2bcd(year / 100);
567         buffer->time[7] = bin2bcd(year % 100);
568
569         buffer->dont_write_tag[0] = 'D';
570         buffer->dont_write_tag[1] = 'W';
571         buffer->end_tag[0] = 'Z';
572         buffer->end_tag[1] = 'Z';
573
574         rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
575
576         kfree(buffer);
577
578         return rc;
579 }
580
581 #define PQI_UPDATE_TIME_WORK_INTERVAL   (24UL * 60 * 60 * HZ)
582
583 static void pqi_update_time_worker(struct work_struct *work)
584 {
585         int rc;
586         struct pqi_ctrl_info *ctrl_info;
587
588         ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
589                 update_time_work);
590
591         rc = pqi_write_current_time_to_host_wellness(ctrl_info);
592         if (rc)
593                 dev_warn(&ctrl_info->pci_dev->dev,
594                         "error updating time on controller\n");
595
596         schedule_delayed_work(&ctrl_info->update_time_work,
597                 PQI_UPDATE_TIME_WORK_INTERVAL);
598 }
599
600 static inline void pqi_schedule_update_time_worker(
601         struct pqi_ctrl_info *ctrl_info)
602 {
603         schedule_delayed_work(&ctrl_info->update_time_work, 0);
604 }
605
606 static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
607         void *buffer, size_t buffer_length)
608 {
609         int rc;
610         int pci_direction;
611         struct pqi_raid_path_request request;
612
613         rc = pqi_build_raid_path_request(ctrl_info, &request,
614                 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
615         if (rc)
616                 return rc;
617
618         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
619                 NULL, NO_TIMEOUT);
620
621         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
622                 pci_direction);
623
624         return rc;
625 }
626
627 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
628         void **buffer)
629 {
630         int rc;
631         size_t lun_list_length;
632         size_t lun_data_length;
633         size_t new_lun_list_length;
634         void *lun_data = NULL;
635         struct report_lun_header *report_lun_header;
636
637         report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
638         if (!report_lun_header) {
639                 rc = -ENOMEM;
640                 goto out;
641         }
642
643         rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
644                 sizeof(*report_lun_header));
645         if (rc)
646                 goto out;
647
648         lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
649
650 again:
651         lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
652
653         lun_data = kmalloc(lun_data_length, GFP_KERNEL);
654         if (!lun_data) {
655                 rc = -ENOMEM;
656                 goto out;
657         }
658
659         if (lun_list_length == 0) {
660                 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
661                 goto out;
662         }
663
664         rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
665         if (rc)
666                 goto out;
667
668         new_lun_list_length = get_unaligned_be32(
669                 &((struct report_lun_header *)lun_data)->list_length);
670
671         if (new_lun_list_length > lun_list_length) {
672                 lun_list_length = new_lun_list_length;
673                 kfree(lun_data);
674                 goto again;
675         }
676
677 out:
678         kfree(report_lun_header);
679
680         if (rc) {
681                 kfree(lun_data);
682                 lun_data = NULL;
683         }
684
685         *buffer = lun_data;
686
687         return rc;
688 }
689
690 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
691         void **buffer)
692 {
693         return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
694                 buffer);
695 }
696
697 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
698         void **buffer)
699 {
700         return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
701 }
702
703 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
704         struct report_phys_lun_extended **physdev_list,
705         struct report_log_lun_extended **logdev_list)
706 {
707         int rc;
708         size_t logdev_list_length;
709         size_t logdev_data_length;
710         struct report_log_lun_extended *internal_logdev_list;
711         struct report_log_lun_extended *logdev_data;
712         struct report_lun_header report_lun_header;
713
714         rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
715         if (rc)
716                 dev_err(&ctrl_info->pci_dev->dev,
717                         "report physical LUNs failed\n");
718
719         rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
720         if (rc)
721                 dev_err(&ctrl_info->pci_dev->dev,
722                         "report logical LUNs failed\n");
723
724         /*
725          * Tack the controller itself onto the end of the logical device list.
726          */
727
728         logdev_data = *logdev_list;
729
730         if (logdev_data) {
731                 logdev_list_length =
732                         get_unaligned_be32(&logdev_data->header.list_length);
733         } else {
734                 memset(&report_lun_header, 0, sizeof(report_lun_header));
735                 logdev_data =
736                         (struct report_log_lun_extended *)&report_lun_header;
737                 logdev_list_length = 0;
738         }
739
740         logdev_data_length = sizeof(struct report_lun_header) +
741                 logdev_list_length;
742
743         internal_logdev_list = kmalloc(logdev_data_length +
744                 sizeof(struct report_log_lun_extended), GFP_KERNEL);
745         if (!internal_logdev_list) {
746                 kfree(*logdev_list);
747                 *logdev_list = NULL;
748                 return -ENOMEM;
749         }
750
751         memcpy(internal_logdev_list, logdev_data, logdev_data_length);
752         memset((u8 *)internal_logdev_list + logdev_data_length, 0,
753                 sizeof(struct report_log_lun_extended_entry));
754         put_unaligned_be32(logdev_list_length +
755                 sizeof(struct report_log_lun_extended_entry),
756                 &internal_logdev_list->header.list_length);
757
758         kfree(*logdev_list);
759         *logdev_list = internal_logdev_list;
760
761         return 0;
762 }
763
764 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
765         int bus, int target, int lun)
766 {
767         device->bus = bus;
768         device->target = target;
769         device->lun = lun;
770 }
771
772 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
773 {
774         u8 *scsi3addr;
775         u32 lunid;
776
777         scsi3addr = device->scsi3addr;
778         lunid = get_unaligned_le32(scsi3addr);
779
780         if (pqi_is_hba_lunid(scsi3addr)) {
781                 /* The specified device is the controller. */
782                 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
783                 device->target_lun_valid = true;
784                 return;
785         }
786
787         if (pqi_is_logical_device(device)) {
788                 pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
789                         lunid & 0x3fff);
790                 device->target_lun_valid = true;
791                 return;
792         }
793
794         /*
795          * Defer target and LUN assignment for non-controller physical devices
796          * because the SAS transport layer will make these assignments later.
797          */
798         pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
799 }
800
801 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
802         struct pqi_scsi_dev *device)
803 {
804         int rc;
805         u8 raid_level;
806         u8 *buffer;
807
808         raid_level = SA_RAID_UNKNOWN;
809
810         buffer = kmalloc(64, GFP_KERNEL);
811         if (buffer) {
812                 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
813                         VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
814                 if (rc == 0) {
815                         raid_level = buffer[8];
816                         if (raid_level > SA_RAID_MAX)
817                                 raid_level = SA_RAID_UNKNOWN;
818                 }
819                 kfree(buffer);
820         }
821
822         device->raid_level = raid_level;
823 }
824
825 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
826         struct pqi_scsi_dev *device, struct raid_map *raid_map)
827 {
828         char *err_msg;
829         u32 raid_map_size;
830         u32 r5or6_blocks_per_row;
831         unsigned int num_phys_disks;
832         unsigned int num_raid_map_entries;
833
834         raid_map_size = get_unaligned_le32(&raid_map->structure_size);
835
836         if (raid_map_size < offsetof(struct raid_map, disk_data)) {
837                 err_msg = "RAID map too small";
838                 goto bad_raid_map;
839         }
840
841         if (raid_map_size > sizeof(*raid_map)) {
842                 err_msg = "RAID map too large";
843                 goto bad_raid_map;
844         }
845
846         num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
847                 (get_unaligned_le16(&raid_map->data_disks_per_row) +
848                 get_unaligned_le16(&raid_map->metadata_disks_per_row));
849         num_raid_map_entries = num_phys_disks *
850                 get_unaligned_le16(&raid_map->row_cnt);
851
852         if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
853                 err_msg = "invalid number of map entries in RAID map";
854                 goto bad_raid_map;
855         }
856
857         if (device->raid_level == SA_RAID_1) {
858                 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
859                         err_msg = "invalid RAID-1 map";
860                         goto bad_raid_map;
861                 }
862         } else if (device->raid_level == SA_RAID_ADM) {
863                 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
864                         err_msg = "invalid RAID-1(ADM) map";
865                         goto bad_raid_map;
866                 }
867         } else if ((device->raid_level == SA_RAID_5 ||
868                 device->raid_level == SA_RAID_6) &&
869                 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
870                 /* RAID 50/60 */
871                 r5or6_blocks_per_row =
872                         get_unaligned_le16(&raid_map->strip_size) *
873                         get_unaligned_le16(&raid_map->data_disks_per_row);
874                 if (r5or6_blocks_per_row == 0) {
875                         err_msg = "invalid RAID-5 or RAID-6 map";
876                         goto bad_raid_map;
877                 }
878         }
879
880         return 0;
881
882 bad_raid_map:
883         dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
884
885         return -EINVAL;
886 }
887
888 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
889         struct pqi_scsi_dev *device)
890 {
891         int rc;
892         int pci_direction;
893         struct pqi_raid_path_request request;
894         struct raid_map *raid_map;
895
896         raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
897         if (!raid_map)
898                 return -ENOMEM;
899
900         rc = pqi_build_raid_path_request(ctrl_info, &request,
901                 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
902                 sizeof(*raid_map), 0, &pci_direction);
903         if (rc)
904                 goto error;
905
906         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
907                 NULL, NO_TIMEOUT);
908
909         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
910                 pci_direction);
911
912         if (rc)
913                 goto error;
914
915         rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
916         if (rc)
917                 goto error;
918
919         device->raid_map = raid_map;
920
921         return 0;
922
923 error:
924         kfree(raid_map);
925
926         return rc;
927 }
928
929 static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
930         struct pqi_scsi_dev *device)
931 {
932         int rc;
933         u8 *buffer;
934         u8 offload_status;
935
936         buffer = kmalloc(64, GFP_KERNEL);
937         if (!buffer)
938                 return;
939
940         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
941                 VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
942         if (rc)
943                 goto out;
944
945 #define OFFLOAD_STATUS_BYTE     4
946 #define OFFLOAD_CONFIGURED_BIT  0x1
947 #define OFFLOAD_ENABLED_BIT     0x2
948
949         offload_status = buffer[OFFLOAD_STATUS_BYTE];
950         device->offload_configured =
951                 !!(offload_status & OFFLOAD_CONFIGURED_BIT);
952         if (device->offload_configured) {
953                 device->offload_enabled_pending =
954                         !!(offload_status & OFFLOAD_ENABLED_BIT);
955                 if (pqi_get_raid_map(ctrl_info, device))
956                         device->offload_enabled_pending = false;
957         }
958
959 out:
960         kfree(buffer);
961 }
962
963 /*
964  * Use vendor-specific VPD to determine online/offline status of a volume.
965  */
966
967 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
968         struct pqi_scsi_dev *device)
969 {
970         int rc;
971         size_t page_length;
972         u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
973         bool volume_offline = true;
974         u32 volume_flags;
975         struct ciss_vpd_logical_volume_status *vpd;
976
977         vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
978         if (!vpd)
979                 goto no_buffer;
980
981         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
982                 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
983         if (rc)
984                 goto out;
985
986         if (vpd->page_code != CISS_VPD_LV_STATUS)
987                 goto out;
988
989         page_length = offsetof(struct ciss_vpd_logical_volume_status,
990                 volume_status) + vpd->page_length;
991         if (page_length < sizeof(*vpd))
992                 goto out;
993
994         volume_status = vpd->volume_status;
995         volume_flags = get_unaligned_be32(&vpd->flags);
996         volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
997
998 out:
999         kfree(vpd);
1000 no_buffer:
1001         device->volume_status = volume_status;
1002         device->volume_offline = volume_offline;
1003 }
1004
1005 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1006         struct pqi_scsi_dev *device)
1007 {
1008         int rc;
1009         u8 *buffer;
1010
1011         buffer = kmalloc(64, GFP_KERNEL);
1012         if (!buffer)
1013                 return -ENOMEM;
1014
1015         /* Send an inquiry to the device to see what it is. */
1016         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1017         if (rc)
1018                 goto out;
1019
1020         scsi_sanitize_inquiry_string(&buffer[8], 8);
1021         scsi_sanitize_inquiry_string(&buffer[16], 16);
1022
1023         device->devtype = buffer[0] & 0x1f;
1024         memcpy(device->vendor, &buffer[8],
1025                 sizeof(device->vendor));
1026         memcpy(device->model, &buffer[16],
1027                 sizeof(device->model));
1028
1029         if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1030                 pqi_get_raid_level(ctrl_info, device);
1031                 pqi_get_offload_status(ctrl_info, device);
1032                 pqi_get_volume_status(ctrl_info, device);
1033         }
1034
1035 out:
1036         kfree(buffer);
1037
1038         return rc;
1039 }
1040
1041 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1042         struct pqi_scsi_dev *device,
1043         struct bmic_identify_physical_device *id_phys)
1044 {
1045         int rc;
1046
1047         memset(id_phys, 0, sizeof(*id_phys));
1048
1049         rc = pqi_identify_physical_device(ctrl_info, device,
1050                 id_phys, sizeof(*id_phys));
1051         if (rc) {
1052                 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1053                 return;
1054         }
1055
1056         device->queue_depth =
1057                 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1058         device->device_type = id_phys->device_type;
1059         device->active_path_index = id_phys->active_path_number;
1060         device->path_map = id_phys->redundant_path_present_map;
1061         memcpy(&device->box,
1062                 &id_phys->alternate_paths_phys_box_on_port,
1063                 sizeof(device->box));
1064         memcpy(&device->phys_connector,
1065                 &id_phys->alternate_paths_phys_connector,
1066                 sizeof(device->phys_connector));
1067         device->bay = id_phys->phys_bay_in_box;
1068 }
1069
1070 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1071         struct pqi_scsi_dev *device)
1072 {
1073         char *status;
1074         static const char unknown_state_str[] =
1075                 "Volume is in an unknown state (%u)";
1076         char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1077
1078         switch (device->volume_status) {
1079         case CISS_LV_OK:
1080                 status = "Volume online";
1081                 break;
1082         case CISS_LV_FAILED:
1083                 status = "Volume failed";
1084                 break;
1085         case CISS_LV_NOT_CONFIGURED:
1086                 status = "Volume not configured";
1087                 break;
1088         case CISS_LV_DEGRADED:
1089                 status = "Volume degraded";
1090                 break;
1091         case CISS_LV_READY_FOR_RECOVERY:
1092                 status = "Volume ready for recovery operation";
1093                 break;
1094         case CISS_LV_UNDERGOING_RECOVERY:
1095                 status = "Volume undergoing recovery";
1096                 break;
1097         case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1098                 status = "Wrong physical drive was replaced";
1099                 break;
1100         case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1101                 status = "A physical drive not properly connected";
1102                 break;
1103         case CISS_LV_HARDWARE_OVERHEATING:
1104                 status = "Hardware is overheating";
1105                 break;
1106         case CISS_LV_HARDWARE_HAS_OVERHEATED:
1107                 status = "Hardware has overheated";
1108                 break;
1109         case CISS_LV_UNDERGOING_EXPANSION:
1110                 status = "Volume undergoing expansion";
1111                 break;
1112         case CISS_LV_NOT_AVAILABLE:
1113                 status = "Volume waiting for transforming volume";
1114                 break;
1115         case CISS_LV_QUEUED_FOR_EXPANSION:
1116                 status = "Volume queued for expansion";
1117                 break;
1118         case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1119                 status = "Volume disabled due to SCSI ID conflict";
1120                 break;
1121         case CISS_LV_EJECTED:
1122                 status = "Volume has been ejected";
1123                 break;
1124         case CISS_LV_UNDERGOING_ERASE:
1125                 status = "Volume undergoing background erase";
1126                 break;
1127         case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1128                 status = "Volume ready for predictive spare rebuild";
1129                 break;
1130         case CISS_LV_UNDERGOING_RPI:
1131                 status = "Volume undergoing rapid parity initialization";
1132                 break;
1133         case CISS_LV_PENDING_RPI:
1134                 status = "Volume queued for rapid parity initialization";
1135                 break;
1136         case CISS_LV_ENCRYPTED_NO_KEY:
1137                 status = "Encrypted volume inaccessible - key not present";
1138                 break;
1139         case CISS_LV_UNDERGOING_ENCRYPTION:
1140                 status = "Volume undergoing encryption process";
1141                 break;
1142         case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1143                 status = "Volume undergoing encryption re-keying process";
1144                 break;
1145         case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1146                 status =
1147                         "Encrypted volume inaccessible - disabled on ctrl";
1148                 break;
1149         case CISS_LV_PENDING_ENCRYPTION:
1150                 status = "Volume pending migration to encrypted state";
1151                 break;
1152         case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1153                 status = "Volume pending encryption rekeying";
1154                 break;
1155         case CISS_LV_NOT_SUPPORTED:
1156                 status = "Volume not supported on this controller";
1157                 break;
1158         case CISS_LV_STATUS_UNAVAILABLE:
1159                 status = "Volume status not available";
1160                 break;
1161         default:
1162                 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1163                         unknown_state_str, device->volume_status);
1164                 status = unknown_state_buffer;
1165                 break;
1166         }
1167
1168         dev_info(&ctrl_info->pci_dev->dev,
1169                 "scsi %d:%d:%d:%d %s\n",
1170                 ctrl_info->scsi_host->host_no,
1171                 device->bus, device->target, device->lun, status);
1172 }
1173
1174 static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
1175         struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
1176 {
1177         struct pqi_scsi_dev *device;
1178
1179         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1180                 scsi_device_list_entry) {
1181                 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1182                         continue;
1183                 if (pqi_is_logical_device(device))
1184                         continue;
1185                 if (device->aio_handle == aio_handle)
1186                         return device;
1187         }
1188
1189         return NULL;
1190 }
1191
1192 static void pqi_update_logical_drive_queue_depth(
1193         struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
1194 {
1195         unsigned int i;
1196         struct raid_map *raid_map;
1197         struct raid_map_disk_data *disk_data;
1198         struct pqi_scsi_dev *phys_disk;
1199         unsigned int num_phys_disks;
1200         unsigned int num_raid_map_entries;
1201         unsigned int queue_depth;
1202
1203         logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
1204
1205         raid_map = logical_drive->raid_map;
1206         if (!raid_map)
1207                 return;
1208
1209         disk_data = raid_map->disk_data;
1210         num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1211                 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1212                 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1213         num_raid_map_entries = num_phys_disks *
1214                 get_unaligned_le16(&raid_map->row_cnt);
1215
1216         queue_depth = 0;
1217         for (i = 0; i < num_raid_map_entries; i++) {
1218                 phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
1219                         disk_data[i].aio_handle);
1220
1221                 if (!phys_disk) {
1222                         dev_warn(&ctrl_info->pci_dev->dev,
1223                                 "failed to find physical disk for logical drive %016llx\n",
1224                                 get_unaligned_be64(logical_drive->scsi3addr));
1225                         logical_drive->offload_enabled = false;
1226                         logical_drive->offload_enabled_pending = false;
1227                         kfree(raid_map);
1228                         logical_drive->raid_map = NULL;
1229                         return;
1230                 }
1231
1232                 queue_depth += phys_disk->queue_depth;
1233         }
1234
1235         logical_drive->queue_depth = queue_depth;
1236 }
1237
1238 static void pqi_update_all_logical_drive_queue_depths(
1239         struct pqi_ctrl_info *ctrl_info)
1240 {
1241         struct pqi_scsi_dev *device;
1242
1243         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1244                 scsi_device_list_entry) {
1245                 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1246                         continue;
1247                 if (!pqi_is_logical_device(device))
1248                         continue;
1249                 pqi_update_logical_drive_queue_depth(ctrl_info, device);
1250         }
1251 }
1252
1253 static void pqi_rescan_worker(struct work_struct *work)
1254 {
1255         struct pqi_ctrl_info *ctrl_info;
1256
1257         ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1258                 rescan_work);
1259
1260         pqi_scan_scsi_devices(ctrl_info);
1261 }
1262
1263 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1264         struct pqi_scsi_dev *device)
1265 {
1266         int rc;
1267
1268         if (pqi_is_logical_device(device))
1269                 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1270                         device->target, device->lun);
1271         else
1272                 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1273
1274         return rc;
1275 }
1276
1277 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1278         struct pqi_scsi_dev *device)
1279 {
1280         if (pqi_is_logical_device(device))
1281                 scsi_remove_device(device->sdev);
1282         else
1283                 pqi_remove_sas_device(device);
1284 }
1285
1286 /* Assumes the SCSI device list lock is held. */
1287
1288 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1289         int bus, int target, int lun)
1290 {
1291         struct pqi_scsi_dev *device;
1292
1293         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1294                 scsi_device_list_entry)
1295                 if (device->bus == bus && device->target == target &&
1296                         device->lun == lun)
1297                         return device;
1298
1299         return NULL;
1300 }
1301
1302 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1303         struct pqi_scsi_dev *dev2)
1304 {
1305         if (dev1->is_physical_device != dev2->is_physical_device)
1306                 return false;
1307
1308         if (dev1->is_physical_device)
1309                 return dev1->wwid == dev2->wwid;
1310
1311         return memcmp(dev1->volume_id, dev2->volume_id,
1312                 sizeof(dev1->volume_id)) == 0;
1313 }
1314
1315 enum pqi_find_result {
1316         DEVICE_NOT_FOUND,
1317         DEVICE_CHANGED,
1318         DEVICE_SAME,
1319 };
1320
1321 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1322         struct pqi_scsi_dev *device_to_find,
1323         struct pqi_scsi_dev **matching_device)
1324 {
1325         struct pqi_scsi_dev *device;
1326
1327         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1328                 scsi_device_list_entry) {
1329                 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1330                         device->scsi3addr)) {
1331                         *matching_device = device;
1332                         if (pqi_device_equal(device_to_find, device)) {
1333                                 if (device_to_find->volume_offline)
1334                                         return DEVICE_CHANGED;
1335                                 return DEVICE_SAME;
1336                         }
1337                         return DEVICE_CHANGED;
1338                 }
1339         }
1340
1341         return DEVICE_NOT_FOUND;
1342 }
1343
1344 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1345         char *action, struct pqi_scsi_dev *device)
1346 {
1347         dev_info(&ctrl_info->pci_dev->dev,
1348                 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1349                 action,
1350                 ctrl_info->scsi_host->host_no,
1351                 device->bus,
1352                 device->target,
1353                 device->lun,
1354                 scsi_device_type(device->devtype),
1355                 device->vendor,
1356                 device->model,
1357                 pqi_raid_level_to_string(device->raid_level),
1358                 device->offload_configured ? '+' : '-',
1359                 device->offload_enabled_pending ? '+' : '-',
1360                 device->expose_device ? '+' : '-',
1361                 device->queue_depth);
1362 }
1363
1364 /* Assumes the SCSI device list lock is held. */
1365
1366 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1367         struct pqi_scsi_dev *new_device)
1368 {
1369         existing_device->devtype = new_device->devtype;
1370         existing_device->device_type = new_device->device_type;
1371         existing_device->bus = new_device->bus;
1372         if (new_device->target_lun_valid) {
1373                 existing_device->target = new_device->target;
1374                 existing_device->lun = new_device->lun;
1375                 existing_device->target_lun_valid = true;
1376         }
1377
1378         /* By definition, the scsi3addr and wwid fields are already the same. */
1379
1380         existing_device->is_physical_device = new_device->is_physical_device;
1381         existing_device->expose_device = new_device->expose_device;
1382         existing_device->no_uld_attach = new_device->no_uld_attach;
1383         existing_device->aio_enabled = new_device->aio_enabled;
1384         memcpy(existing_device->vendor, new_device->vendor,
1385                 sizeof(existing_device->vendor));
1386         memcpy(existing_device->model, new_device->model,
1387                 sizeof(existing_device->model));
1388         existing_device->sas_address = new_device->sas_address;
1389         existing_device->raid_level = new_device->raid_level;
1390         existing_device->queue_depth = new_device->queue_depth;
1391         existing_device->aio_handle = new_device->aio_handle;
1392         existing_device->volume_status = new_device->volume_status;
1393         existing_device->active_path_index = new_device->active_path_index;
1394         existing_device->path_map = new_device->path_map;
1395         existing_device->bay = new_device->bay;
1396         memcpy(existing_device->box, new_device->box,
1397                 sizeof(existing_device->box));
1398         memcpy(existing_device->phys_connector, new_device->phys_connector,
1399                 sizeof(existing_device->phys_connector));
1400         existing_device->offload_configured = new_device->offload_configured;
1401         existing_device->offload_enabled = false;
1402         existing_device->offload_enabled_pending =
1403                 new_device->offload_enabled_pending;
1404         existing_device->offload_to_mirror = 0;
1405         kfree(existing_device->raid_map);
1406         existing_device->raid_map = new_device->raid_map;
1407
1408         /* To prevent this from being freed later. */
1409         new_device->raid_map = NULL;
1410 }
1411
1412 static inline void pqi_free_device(struct pqi_scsi_dev *device)
1413 {
1414         if (device) {
1415                 kfree(device->raid_map);
1416                 kfree(device);
1417         }
1418 }
1419
1420 /*
1421  * Called when exposing a new device to the OS fails in order to re-adjust
1422  * our internal SCSI device list to match the SCSI ML's view.
1423  */
1424
1425 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1426         struct pqi_scsi_dev *device)
1427 {
1428         unsigned long flags;
1429
1430         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1431         list_del(&device->scsi_device_list_entry);
1432         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1433
1434         /* Allow the device structure to be freed later. */
1435         device->keep_device = false;
1436 }
1437
1438 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1439         struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1440 {
1441         int rc;
1442         unsigned int i;
1443         unsigned long flags;
1444         enum pqi_find_result find_result;
1445         struct pqi_scsi_dev *device;
1446         struct pqi_scsi_dev *next;
1447         struct pqi_scsi_dev *matching_device;
1448         struct list_head add_list;
1449         struct list_head delete_list;
1450
1451         INIT_LIST_HEAD(&add_list);
1452         INIT_LIST_HEAD(&delete_list);
1453
1454         /*
1455          * The idea here is to do as little work as possible while holding the
1456          * spinlock.  That's why we go to great pains to defer anything other
1457          * than updating the internal device list until after we release the
1458          * spinlock.
1459          */
1460
1461         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1462
1463         /* Assume that all devices in the existing list have gone away. */
1464         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1465                 scsi_device_list_entry)
1466                 device->device_gone = true;
1467
1468         for (i = 0; i < num_new_devices; i++) {
1469                 device = new_device_list[i];
1470
1471                 find_result = pqi_scsi_find_entry(ctrl_info, device,
1472                                                 &matching_device);
1473
1474                 switch (find_result) {
1475                 case DEVICE_SAME:
1476                         /*
1477                          * The newly found device is already in the existing
1478                          * device list.
1479                          */
1480                         device->new_device = false;
1481                         matching_device->device_gone = false;
1482                         pqi_scsi_update_device(matching_device, device);
1483                         break;
1484                 case DEVICE_NOT_FOUND:
1485                         /*
1486                          * The newly found device is NOT in the existing device
1487                          * list.
1488                          */
1489                         device->new_device = true;
1490                         break;
1491                 case DEVICE_CHANGED:
1492                         /*
1493                          * The original device has gone away and we need to add
1494                          * the new device.
1495                          */
1496                         device->new_device = true;
1497                         break;
1498                 default:
1499                         WARN_ON(find_result);
1500                         break;
1501                 }
1502         }
1503
1504         /* Process all devices that have gone away. */
1505         list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1506                 scsi_device_list_entry) {
1507                 if (device->device_gone) {
1508                         list_del(&device->scsi_device_list_entry);
1509                         list_add_tail(&device->delete_list_entry, &delete_list);
1510                 }
1511         }
1512
1513         /* Process all new devices. */
1514         for (i = 0; i < num_new_devices; i++) {
1515                 device = new_device_list[i];
1516                 if (!device->new_device)
1517                         continue;
1518                 if (device->volume_offline)
1519                         continue;
1520                 list_add_tail(&device->scsi_device_list_entry,
1521                         &ctrl_info->scsi_device_list);
1522                 list_add_tail(&device->add_list_entry, &add_list);
1523                 /* To prevent this device structure from being freed later. */
1524                 device->keep_device = true;
1525         }
1526
1527         pqi_update_all_logical_drive_queue_depths(ctrl_info);
1528
1529         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1530                 scsi_device_list_entry)
1531                 device->offload_enabled =
1532                         device->offload_enabled_pending;
1533
1534         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1535
1536         /* Remove all devices that have gone away. */
1537         list_for_each_entry_safe(device, next, &delete_list,
1538                 delete_list_entry) {
1539                 if (device->sdev)
1540                         pqi_remove_device(ctrl_info, device);
1541                 if (device->volume_offline) {
1542                         pqi_dev_info(ctrl_info, "offline", device);
1543                         pqi_show_volume_status(ctrl_info, device);
1544                 } else {
1545                         pqi_dev_info(ctrl_info, "removed", device);
1546                 }
1547                 list_del(&device->delete_list_entry);
1548                 pqi_free_device(device);
1549         }
1550
1551         /*
1552          * Notify the SCSI ML if the queue depth of any existing device has
1553          * changed.
1554          */
1555         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1556                 scsi_device_list_entry) {
1557                 if (device->sdev && device->queue_depth !=
1558                         device->advertised_queue_depth) {
1559                         device->advertised_queue_depth = device->queue_depth;
1560                         scsi_change_queue_depth(device->sdev,
1561                                 device->advertised_queue_depth);
1562                 }
1563         }
1564
1565         /* Expose any new devices. */
1566         list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1567                 if (device->expose_device && !device->sdev) {
1568                         rc = pqi_add_device(ctrl_info, device);
1569                         if (rc) {
1570                                 dev_warn(&ctrl_info->pci_dev->dev,
1571                                         "scsi %d:%d:%d:%d addition failed, device not added\n",
1572                                         ctrl_info->scsi_host->host_no,
1573                                         device->bus, device->target,
1574                                         device->lun);
1575                                 pqi_fixup_botched_add(ctrl_info, device);
1576                                 continue;
1577                         }
1578                 }
1579                 pqi_dev_info(ctrl_info, "added", device);
1580         }
1581 }
1582
1583 static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1584 {
1585         bool is_supported = false;
1586
1587         switch (device->devtype) {
1588         case TYPE_DISK:
1589         case TYPE_ZBC:
1590         case TYPE_TAPE:
1591         case TYPE_MEDIUM_CHANGER:
1592         case TYPE_ENCLOSURE:
1593                 is_supported = true;
1594                 break;
1595         case TYPE_RAID:
1596                 /*
1597                  * Only support the HBA controller itself as a RAID
1598                  * controller.  If it's a RAID controller other than
1599                  * the HBA itself (an external RAID controller, MSA500
1600                  * or similar), we don't support it.
1601                  */
1602                 if (pqi_is_hba_lunid(device->scsi3addr))
1603                         is_supported = true;
1604                 break;
1605         }
1606
1607         return is_supported;
1608 }
1609
1610 static inline bool pqi_skip_device(u8 *scsi3addr,
1611         struct report_phys_lun_extended_entry *phys_lun_ext_entry)
1612 {
1613         u8 device_flags;
1614
1615         if (!MASKED_DEVICE(scsi3addr))
1616                 return false;
1617
1618         /* The device is masked. */
1619
1620         device_flags = phys_lun_ext_entry->device_flags;
1621
1622         if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
1623                 /*
1624                  * It's a non-disk device.  We ignore all devices of this type
1625                  * when they're masked.
1626                  */
1627                 return true;
1628         }
1629
1630         return false;
1631 }
1632
1633 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1634 {
1635         /* Expose all devices except for physical devices that are masked. */
1636         if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
1637                 return false;
1638
1639         return true;
1640 }
1641
1642 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1643 {
1644         int i;
1645         int rc;
1646         struct list_head new_device_list_head;
1647         struct report_phys_lun_extended *physdev_list = NULL;
1648         struct report_log_lun_extended *logdev_list = NULL;
1649         struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1650         struct report_log_lun_extended_entry *log_lun_ext_entry;
1651         struct bmic_identify_physical_device *id_phys = NULL;
1652         u32 num_physicals;
1653         u32 num_logicals;
1654         struct pqi_scsi_dev **new_device_list = NULL;
1655         struct pqi_scsi_dev *device;
1656         struct pqi_scsi_dev *next;
1657         unsigned int num_new_devices;
1658         unsigned int num_valid_devices;
1659         bool is_physical_device;
1660         u8 *scsi3addr;
1661         static char *out_of_memory_msg =
1662                 "out of memory, device discovery stopped";
1663
1664         INIT_LIST_HEAD(&new_device_list_head);
1665
1666         rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1667         if (rc)
1668                 goto out;
1669
1670         if (physdev_list)
1671                 num_physicals =
1672                         get_unaligned_be32(&physdev_list->header.list_length)
1673                                 / sizeof(physdev_list->lun_entries[0]);
1674         else
1675                 num_physicals = 0;
1676
1677         if (logdev_list)
1678                 num_logicals =
1679                         get_unaligned_be32(&logdev_list->header.list_length)
1680                                 / sizeof(logdev_list->lun_entries[0]);
1681         else
1682                 num_logicals = 0;
1683
1684         if (num_physicals) {
1685                 /*
1686                  * We need this buffer for calls to pqi_get_physical_disk_info()
1687                  * below.  We allocate it here instead of inside
1688                  * pqi_get_physical_disk_info() because it's a fairly large
1689                  * buffer.
1690                  */
1691                 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1692                 if (!id_phys) {
1693                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1694                                 out_of_memory_msg);
1695                         rc = -ENOMEM;
1696                         goto out;
1697                 }
1698         }
1699
1700         num_new_devices = num_physicals + num_logicals;
1701
1702         new_device_list = kmalloc(sizeof(*new_device_list) *
1703                 num_new_devices, GFP_KERNEL);
1704         if (!new_device_list) {
1705                 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1706                 rc = -ENOMEM;
1707                 goto out;
1708         }
1709
1710         for (i = 0; i < num_new_devices; i++) {
1711                 device = kzalloc(sizeof(*device), GFP_KERNEL);
1712                 if (!device) {
1713                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1714                                 out_of_memory_msg);
1715                         rc = -ENOMEM;
1716                         goto out;
1717                 }
1718                 list_add_tail(&device->new_device_list_entry,
1719                         &new_device_list_head);
1720         }
1721
1722         device = NULL;
1723         num_valid_devices = 0;
1724
1725         for (i = 0; i < num_new_devices; i++) {
1726
1727                 if (i < num_physicals) {
1728                         is_physical_device = true;
1729                         phys_lun_ext_entry = &physdev_list->lun_entries[i];
1730                         log_lun_ext_entry = NULL;
1731                         scsi3addr = phys_lun_ext_entry->lunid;
1732                 } else {
1733                         is_physical_device = false;
1734                         phys_lun_ext_entry = NULL;
1735                         log_lun_ext_entry =
1736                                 &logdev_list->lun_entries[i - num_physicals];
1737                         scsi3addr = log_lun_ext_entry->lunid;
1738                 }
1739
1740                 if (is_physical_device &&
1741                         pqi_skip_device(scsi3addr, phys_lun_ext_entry))
1742                         continue;
1743
1744                 if (device)
1745                         device = list_next_entry(device, new_device_list_entry);
1746                 else
1747                         device = list_first_entry(&new_device_list_head,
1748                                 struct pqi_scsi_dev, new_device_list_entry);
1749
1750                 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1751                 device->is_physical_device = is_physical_device;
1752                 device->raid_level = SA_RAID_UNKNOWN;
1753
1754                 /* Gather information about the device. */
1755                 rc = pqi_get_device_info(ctrl_info, device);
1756                 if (rc == -ENOMEM) {
1757                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1758                                 out_of_memory_msg);
1759                         goto out;
1760                 }
1761                 if (rc) {
1762                         dev_warn(&ctrl_info->pci_dev->dev,
1763                                 "obtaining device info failed, skipping device %016llx\n",
1764                                 get_unaligned_be64(device->scsi3addr));
1765                         rc = 0;
1766                         continue;
1767                 }
1768
1769                 if (!pqi_is_supported_device(device))
1770                         continue;
1771
1772                 pqi_assign_bus_target_lun(device);
1773
1774                 device->expose_device = pqi_expose_device(device);
1775
1776                 if (device->is_physical_device) {
1777                         device->wwid = phys_lun_ext_entry->wwid;
1778                         if ((phys_lun_ext_entry->device_flags &
1779                                 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1780                                 phys_lun_ext_entry->aio_handle)
1781                                 device->aio_enabled = true;
1782                 } else {
1783                         memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1784                                 sizeof(device->volume_id));
1785                 }
1786
1787                 switch (device->devtype) {
1788                 case TYPE_DISK:
1789                 case TYPE_ZBC:
1790                 case TYPE_ENCLOSURE:
1791                         if (device->is_physical_device) {
1792                                 device->sas_address =
1793                                         get_unaligned_be64(&device->wwid);
1794                                 if (device->devtype == TYPE_DISK ||
1795                                         device->devtype == TYPE_ZBC) {
1796                                         device->aio_handle =
1797                                                 phys_lun_ext_entry->aio_handle;
1798                                         pqi_get_physical_disk_info(ctrl_info,
1799                                                 device, id_phys);
1800                                 }
1801                         }
1802                         break;
1803                 }
1804
1805                 new_device_list[num_valid_devices++] = device;
1806         }
1807
1808         pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1809
1810 out:
1811         list_for_each_entry_safe(device, next, &new_device_list_head,
1812                 new_device_list_entry) {
1813                 if (device->keep_device)
1814                         continue;
1815                 list_del(&device->new_device_list_entry);
1816                 pqi_free_device(device);
1817         }
1818
1819         kfree(new_device_list);
1820         kfree(physdev_list);
1821         kfree(logdev_list);
1822         kfree(id_phys);
1823
1824         return rc;
1825 }
1826
1827 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1828 {
1829         unsigned long flags;
1830         struct pqi_scsi_dev *device;
1831         struct pqi_scsi_dev *next;
1832
1833         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1834
1835         list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1836                 scsi_device_list_entry) {
1837                 if (device->sdev)
1838                         pqi_remove_device(ctrl_info, device);
1839                 list_del(&device->scsi_device_list_entry);
1840                 pqi_free_device(device);
1841         }
1842
1843         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1844 }
1845
1846 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1847 {
1848         int rc;
1849
1850         if (pqi_ctrl_offline(ctrl_info))
1851                 return -ENXIO;
1852
1853         mutex_lock(&ctrl_info->scan_mutex);
1854
1855         rc = pqi_update_scsi_devices(ctrl_info);
1856         if (rc)
1857                 pqi_schedule_rescan_worker(ctrl_info);
1858
1859         mutex_unlock(&ctrl_info->scan_mutex);
1860
1861         return rc;
1862 }
1863
1864 static void pqi_scan_start(struct Scsi_Host *shost)
1865 {
1866         pqi_scan_scsi_devices(shost_to_hba(shost));
1867 }
1868
1869 /* Returns TRUE if scan is finished. */
1870
1871 static int pqi_scan_finished(struct Scsi_Host *shost,
1872         unsigned long elapsed_time)
1873 {
1874         struct pqi_ctrl_info *ctrl_info;
1875
1876         ctrl_info = shost_priv(shost);
1877
1878         return !mutex_is_locked(&ctrl_info->scan_mutex);
1879 }
1880
1881 static inline void pqi_set_encryption_info(
1882         struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
1883         u64 first_block)
1884 {
1885         u32 volume_blk_size;
1886
1887         /*
1888          * Set the encryption tweak values based on logical block address.
1889          * If the block size is 512, the tweak value is equal to the LBA.
1890          * For other block sizes, tweak value is (LBA * block size) / 512.
1891          */
1892         volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
1893         if (volume_blk_size != 512)
1894                 first_block = (first_block * volume_blk_size) / 512;
1895
1896         encryption_info->data_encryption_key_index =
1897                 get_unaligned_le16(&raid_map->data_encryption_key_index);
1898         encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
1899         encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
1900 }
1901
1902 /*
1903  * Attempt to perform offload RAID mapping for a logical volume I/O.
1904  */
1905
1906 #define PQI_RAID_BYPASS_INELIGIBLE      1
1907
1908 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
1909         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
1910         struct pqi_queue_group *queue_group)
1911 {
1912         struct raid_map *raid_map;
1913         bool is_write = false;
1914         u32 map_index;
1915         u64 first_block;
1916         u64 last_block;
1917         u32 block_cnt;
1918         u32 blocks_per_row;
1919         u64 first_row;
1920         u64 last_row;
1921         u32 first_row_offset;
1922         u32 last_row_offset;
1923         u32 first_column;
1924         u32 last_column;
1925         u64 r0_first_row;
1926         u64 r0_last_row;
1927         u32 r5or6_blocks_per_row;
1928         u64 r5or6_first_row;
1929         u64 r5or6_last_row;
1930         u32 r5or6_first_row_offset;
1931         u32 r5or6_last_row_offset;
1932         u32 r5or6_first_column;
1933         u32 r5or6_last_column;
1934         u16 data_disks_per_row;
1935         u32 total_disks_per_row;
1936         u16 layout_map_count;
1937         u32 stripesize;
1938         u16 strip_size;
1939         u32 first_group;
1940         u32 last_group;
1941         u32 current_group;
1942         u32 map_row;
1943         u32 aio_handle;
1944         u64 disk_block;
1945         u32 disk_block_cnt;
1946         u8 cdb[16];
1947         u8 cdb_length;
1948         int offload_to_mirror;
1949         struct pqi_encryption_info *encryption_info_ptr;
1950         struct pqi_encryption_info encryption_info;
1951 #if BITS_PER_LONG == 32
1952         u64 tmpdiv;
1953 #endif
1954
1955         /* Check for valid opcode, get LBA and block count. */
1956         switch (scmd->cmnd[0]) {
1957         case WRITE_6:
1958                 is_write = true;
1959                 /* fall through */
1960         case READ_6:
1961                 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
1962                         (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
1963                 block_cnt = (u32)scmd->cmnd[4];
1964                 if (block_cnt == 0)
1965                         block_cnt = 256;
1966                 break;
1967         case WRITE_10:
1968                 is_write = true;
1969                 /* fall through */
1970         case READ_10:
1971                 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1972                 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
1973                 break;
1974         case WRITE_12:
1975                 is_write = true;
1976                 /* fall through */
1977         case READ_12:
1978                 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1979                 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1980                 break;
1981         case WRITE_16:
1982                 is_write = true;
1983                 /* fall through */
1984         case READ_16:
1985                 first_block = get_unaligned_be64(&scmd->cmnd[2]);
1986                 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
1987                 break;
1988         default:
1989                 /* Process via normal I/O path. */
1990                 return PQI_RAID_BYPASS_INELIGIBLE;
1991         }
1992
1993         /* Check for write to non-RAID-0. */
1994         if (is_write && device->raid_level != SA_RAID_0)
1995                 return PQI_RAID_BYPASS_INELIGIBLE;
1996
1997         if (unlikely(block_cnt == 0))
1998                 return PQI_RAID_BYPASS_INELIGIBLE;
1999
2000         last_block = first_block + block_cnt - 1;
2001         raid_map = device->raid_map;
2002
2003         /* Check for invalid block or wraparound. */
2004         if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2005                 last_block < first_block)
2006                 return PQI_RAID_BYPASS_INELIGIBLE;
2007
2008         data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2009         strip_size = get_unaligned_le16(&raid_map->strip_size);
2010         layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2011
2012         /* Calculate stripe information for the request. */
2013         blocks_per_row = data_disks_per_row * strip_size;
2014 #if BITS_PER_LONG == 32
2015         tmpdiv = first_block;
2016         do_div(tmpdiv, blocks_per_row);
2017         first_row = tmpdiv;
2018         tmpdiv = last_block;
2019         do_div(tmpdiv, blocks_per_row);
2020         last_row = tmpdiv;
2021         first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2022         last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2023         tmpdiv = first_row_offset;
2024         do_div(tmpdiv, strip_size);
2025         first_column = tmpdiv;
2026         tmpdiv = last_row_offset;
2027         do_div(tmpdiv, strip_size);
2028         last_column = tmpdiv;
2029 #else
2030         first_row = first_block / blocks_per_row;
2031         last_row = last_block / blocks_per_row;
2032         first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2033         last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2034         first_column = first_row_offset / strip_size;
2035         last_column = last_row_offset / strip_size;
2036 #endif
2037
2038         /* If this isn't a single row/column then give to the controller. */
2039         if (first_row != last_row || first_column != last_column)
2040                 return PQI_RAID_BYPASS_INELIGIBLE;
2041
2042         /* Proceeding with driver mapping. */
2043         total_disks_per_row = data_disks_per_row +
2044                 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2045         map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2046                 get_unaligned_le16(&raid_map->row_cnt);
2047         map_index = (map_row * total_disks_per_row) + first_column;
2048
2049         /* RAID 1 */
2050         if (device->raid_level == SA_RAID_1) {
2051                 if (device->offload_to_mirror)
2052                         map_index += data_disks_per_row;
2053                 device->offload_to_mirror = !device->offload_to_mirror;
2054         } else if (device->raid_level == SA_RAID_ADM) {
2055                 /* RAID ADM */
2056                 /*
2057                  * Handles N-way mirrors  (R1-ADM) and R10 with # of drives
2058                  * divisible by 3.
2059                  */
2060                 offload_to_mirror = device->offload_to_mirror;
2061                 if (offload_to_mirror == 0)  {
2062                         /* use physical disk in the first mirrored group. */
2063                         map_index %= data_disks_per_row;
2064                 } else {
2065                         do {
2066                                 /*
2067                                  * Determine mirror group that map_index
2068                                  * indicates.
2069                                  */
2070                                 current_group = map_index / data_disks_per_row;
2071
2072                                 if (offload_to_mirror != current_group) {
2073                                         if (current_group <
2074                                                 layout_map_count - 1) {
2075                                                 /*
2076                                                  * Select raid index from
2077                                                  * next group.
2078                                                  */
2079                                                 map_index += data_disks_per_row;
2080                                                 current_group++;
2081                                         } else {
2082                                                 /*
2083                                                  * Select raid index from first
2084                                                  * group.
2085                                                  */
2086                                                 map_index %= data_disks_per_row;
2087                                                 current_group = 0;
2088                                         }
2089                                 }
2090                         } while (offload_to_mirror != current_group);
2091                 }
2092
2093                 /* Set mirror group to use next time. */
2094                 offload_to_mirror =
2095                         (offload_to_mirror >= layout_map_count - 1) ?
2096                                 0 : offload_to_mirror + 1;
2097                 WARN_ON(offload_to_mirror >= layout_map_count);
2098                 device->offload_to_mirror = offload_to_mirror;
2099                 /*
2100                  * Avoid direct use of device->offload_to_mirror within this
2101                  * function since multiple threads might simultaneously
2102                  * increment it beyond the range of device->layout_map_count -1.
2103                  */
2104         } else if ((device->raid_level == SA_RAID_5 ||
2105                 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2106                 /* RAID 50/60 */
2107                 /* Verify first and last block are in same RAID group */
2108                 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2109                 stripesize = r5or6_blocks_per_row * layout_map_count;
2110 #if BITS_PER_LONG == 32
2111                 tmpdiv = first_block;
2112                 first_group = do_div(tmpdiv, stripesize);
2113                 tmpdiv = first_group;
2114                 do_div(tmpdiv, r5or6_blocks_per_row);
2115                 first_group = tmpdiv;
2116                 tmpdiv = last_block;
2117                 last_group = do_div(tmpdiv, stripesize);
2118                 tmpdiv = last_group;
2119                 do_div(tmpdiv, r5or6_blocks_per_row);
2120                 last_group = tmpdiv;
2121 #else
2122                 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2123                 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2124 #endif
2125                 if (first_group != last_group)
2126                         return PQI_RAID_BYPASS_INELIGIBLE;
2127
2128                 /* Verify request is in a single row of RAID 5/6 */
2129 #if BITS_PER_LONG == 32
2130                 tmpdiv = first_block;
2131                 do_div(tmpdiv, stripesize);
2132                 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2133                 tmpdiv = last_block;
2134                 do_div(tmpdiv, stripesize);
2135                 r5or6_last_row = r0_last_row = tmpdiv;
2136 #else
2137                 first_row = r5or6_first_row = r0_first_row =
2138                         first_block / stripesize;
2139                 r5or6_last_row = r0_last_row = last_block / stripesize;
2140 #endif
2141                 if (r5or6_first_row != r5or6_last_row)
2142                         return PQI_RAID_BYPASS_INELIGIBLE;
2143
2144                 /* Verify request is in a single column */
2145 #if BITS_PER_LONG == 32
2146                 tmpdiv = first_block;
2147                 first_row_offset = do_div(tmpdiv, stripesize);
2148                 tmpdiv = first_row_offset;
2149                 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2150                 r5or6_first_row_offset = first_row_offset;
2151                 tmpdiv = last_block;
2152                 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2153                 tmpdiv = r5or6_last_row_offset;
2154                 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2155                 tmpdiv = r5or6_first_row_offset;
2156                 do_div(tmpdiv, strip_size);
2157                 first_column = r5or6_first_column = tmpdiv;
2158                 tmpdiv = r5or6_last_row_offset;
2159                 do_div(tmpdiv, strip_size);
2160                 r5or6_last_column = tmpdiv;
2161 #else
2162                 first_row_offset = r5or6_first_row_offset =
2163                         (u32)((first_block % stripesize) %
2164                         r5or6_blocks_per_row);
2165
2166                 r5or6_last_row_offset =
2167                         (u32)((last_block % stripesize) %
2168                         r5or6_blocks_per_row);
2169
2170                 first_column = r5or6_first_row_offset / strip_size;
2171                 r5or6_first_column = first_column;
2172                 r5or6_last_column = r5or6_last_row_offset / strip_size;
2173 #endif
2174                 if (r5or6_first_column != r5or6_last_column)
2175                         return PQI_RAID_BYPASS_INELIGIBLE;
2176
2177                 /* Request is eligible */
2178                 map_row =
2179                         ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2180                         get_unaligned_le16(&raid_map->row_cnt);
2181
2182                 map_index = (first_group *
2183                         (get_unaligned_le16(&raid_map->row_cnt) *
2184                         total_disks_per_row)) +
2185                         (map_row * total_disks_per_row) + first_column;
2186         }
2187
2188         if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2189                 return PQI_RAID_BYPASS_INELIGIBLE;
2190
2191         aio_handle = raid_map->disk_data[map_index].aio_handle;
2192         disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2193                 first_row * strip_size +
2194                 (first_row_offset - first_column * strip_size);
2195         disk_block_cnt = block_cnt;
2196
2197         /* Handle differing logical/physical block sizes. */
2198         if (raid_map->phys_blk_shift) {
2199                 disk_block <<= raid_map->phys_blk_shift;
2200                 disk_block_cnt <<= raid_map->phys_blk_shift;
2201         }
2202
2203         if (unlikely(disk_block_cnt > 0xffff))
2204                 return PQI_RAID_BYPASS_INELIGIBLE;
2205
2206         /* Build the new CDB for the physical disk I/O. */
2207         if (disk_block > 0xffffffff) {
2208                 cdb[0] = is_write ? WRITE_16 : READ_16;
2209                 cdb[1] = 0;
2210                 put_unaligned_be64(disk_block, &cdb[2]);
2211                 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2212                 cdb[14] = 0;
2213                 cdb[15] = 0;
2214                 cdb_length = 16;
2215         } else {
2216                 cdb[0] = is_write ? WRITE_10 : READ_10;
2217                 cdb[1] = 0;
2218                 put_unaligned_be32((u32)disk_block, &cdb[2]);
2219                 cdb[6] = 0;
2220                 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2221                 cdb[9] = 0;
2222                 cdb_length = 10;
2223         }
2224
2225         if (get_unaligned_le16(&raid_map->flags) &
2226                 RAID_MAP_ENCRYPTION_ENABLED) {
2227                 pqi_set_encryption_info(&encryption_info, raid_map,
2228                         first_block);
2229                 encryption_info_ptr = &encryption_info;
2230         } else {
2231                 encryption_info_ptr = NULL;
2232         }
2233
2234         return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2235                 cdb, cdb_length, queue_group, encryption_info_ptr);
2236 }
2237
2238 #define PQI_STATUS_IDLE         0x0
2239
2240 #define PQI_CREATE_ADMIN_QUEUE_PAIR     1
2241 #define PQI_DELETE_ADMIN_QUEUE_PAIR     2
2242
2243 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET             0x0
2244 #define PQI_DEVICE_STATE_STATUS_AVAILABLE               0x1
2245 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY            0x2
2246 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY         0x3
2247 #define PQI_DEVICE_STATE_ERROR                          0x4
2248
2249 #define PQI_MODE_READY_TIMEOUT_SECS             30
2250 #define PQI_MODE_READY_POLL_INTERVAL_MSECS      1
2251
2252 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2253 {
2254         struct pqi_device_registers __iomem *pqi_registers;
2255         unsigned long timeout;
2256         u64 signature;
2257         u8 status;
2258
2259         pqi_registers = ctrl_info->pqi_registers;
2260         timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2261
2262         while (1) {
2263                 signature = readq(&pqi_registers->signature);
2264                 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2265                         sizeof(signature)) == 0)
2266                         break;
2267                 if (time_after(jiffies, timeout)) {
2268                         dev_err(&ctrl_info->pci_dev->dev,
2269                                 "timed out waiting for PQI signature\n");
2270                         return -ETIMEDOUT;
2271                 }
2272                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2273         }
2274
2275         while (1) {
2276                 status = readb(&pqi_registers->function_and_status_code);
2277                 if (status == PQI_STATUS_IDLE)
2278                         break;
2279                 if (time_after(jiffies, timeout)) {
2280                         dev_err(&ctrl_info->pci_dev->dev,
2281                                 "timed out waiting for PQI IDLE\n");
2282                         return -ETIMEDOUT;
2283                 }
2284                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2285         }
2286
2287         while (1) {
2288                 if (readl(&pqi_registers->device_status) ==
2289                         PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2290                         break;
2291                 if (time_after(jiffies, timeout)) {
2292                         dev_err(&ctrl_info->pci_dev->dev,
2293                                 "timed out waiting for PQI all registers ready\n");
2294                         return -ETIMEDOUT;
2295                 }
2296                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2297         }
2298
2299         return 0;
2300 }
2301
2302 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2303 {
2304         struct pqi_scsi_dev *device;
2305
2306         device = io_request->scmd->device->hostdata;
2307         device->offload_enabled = false;
2308 }
2309
2310 static inline void pqi_take_device_offline(struct scsi_device *sdev)
2311 {
2312         struct pqi_ctrl_info *ctrl_info;
2313         struct pqi_scsi_dev *device;
2314
2315         if (scsi_device_online(sdev)) {
2316                 scsi_device_set_state(sdev, SDEV_OFFLINE);
2317                 ctrl_info = shost_to_hba(sdev->host);
2318                 schedule_delayed_work(&ctrl_info->rescan_work, 0);
2319                 device = sdev->hostdata;
2320                 dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
2321                         ctrl_info->scsi_host->host_no, device->bus,
2322                         device->target, device->lun);
2323         }
2324 }
2325
2326 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2327 {
2328         u8 scsi_status;
2329         u8 host_byte;
2330         struct scsi_cmnd *scmd;
2331         struct pqi_raid_error_info *error_info;
2332         size_t sense_data_length;
2333         int residual_count;
2334         int xfer_count;
2335         struct scsi_sense_hdr sshdr;
2336
2337         scmd = io_request->scmd;
2338         if (!scmd)
2339                 return;
2340
2341         error_info = io_request->error_info;
2342         scsi_status = error_info->status;
2343         host_byte = DID_OK;
2344
2345         if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2346                 xfer_count =
2347                         get_unaligned_le32(&error_info->data_out_transferred);
2348                 residual_count = scsi_bufflen(scmd) - xfer_count;
2349                 scsi_set_resid(scmd, residual_count);
2350                 if (xfer_count < scmd->underflow)
2351                         host_byte = DID_SOFT_ERROR;
2352         }
2353
2354         sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2355         if (sense_data_length == 0)
2356                 sense_data_length =
2357                         get_unaligned_le16(&error_info->response_data_length);
2358         if (sense_data_length) {
2359                 if (sense_data_length > sizeof(error_info->data))
2360                         sense_data_length = sizeof(error_info->data);
2361
2362                 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2363                         scsi_normalize_sense(error_info->data,
2364                                 sense_data_length, &sshdr) &&
2365                                 sshdr.sense_key == HARDWARE_ERROR &&
2366                                 sshdr.asc == 0x3e &&
2367                                 sshdr.ascq == 0x1) {
2368                         pqi_take_device_offline(scmd->device);
2369                         host_byte = DID_NO_CONNECT;
2370                 }
2371
2372                 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2373                         sense_data_length = SCSI_SENSE_BUFFERSIZE;
2374                 memcpy(scmd->sense_buffer, error_info->data,
2375                         sense_data_length);
2376         }
2377
2378         scmd->result = scsi_status;
2379         set_host_byte(scmd, host_byte);
2380 }
2381
2382 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2383 {
2384         u8 scsi_status;
2385         u8 host_byte;
2386         struct scsi_cmnd *scmd;
2387         struct pqi_aio_error_info *error_info;
2388         size_t sense_data_length;
2389         int residual_count;
2390         int xfer_count;
2391         bool device_offline;
2392
2393         scmd = io_request->scmd;
2394         error_info = io_request->error_info;
2395         host_byte = DID_OK;
2396         sense_data_length = 0;
2397         device_offline = false;
2398
2399         switch (error_info->service_response) {
2400         case PQI_AIO_SERV_RESPONSE_COMPLETE:
2401                 scsi_status = error_info->status;
2402                 break;
2403         case PQI_AIO_SERV_RESPONSE_FAILURE:
2404                 switch (error_info->status) {
2405                 case PQI_AIO_STATUS_IO_ABORTED:
2406                         scsi_status = SAM_STAT_TASK_ABORTED;
2407                         break;
2408                 case PQI_AIO_STATUS_UNDERRUN:
2409                         scsi_status = SAM_STAT_GOOD;
2410                         residual_count = get_unaligned_le32(
2411                                                 &error_info->residual_count);
2412                         scsi_set_resid(scmd, residual_count);
2413                         xfer_count = scsi_bufflen(scmd) - residual_count;
2414                         if (xfer_count < scmd->underflow)
2415                                 host_byte = DID_SOFT_ERROR;
2416                         break;
2417                 case PQI_AIO_STATUS_OVERRUN:
2418                         scsi_status = SAM_STAT_GOOD;
2419                         break;
2420                 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2421                         pqi_aio_path_disabled(io_request);
2422                         scsi_status = SAM_STAT_GOOD;
2423                         io_request->status = -EAGAIN;
2424                         break;
2425                 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2426                 case PQI_AIO_STATUS_INVALID_DEVICE:
2427                         device_offline = true;
2428                         pqi_take_device_offline(scmd->device);
2429                         host_byte = DID_NO_CONNECT;
2430                         scsi_status = SAM_STAT_CHECK_CONDITION;
2431                         break;
2432                 case PQI_AIO_STATUS_IO_ERROR:
2433                 default:
2434                         scsi_status = SAM_STAT_CHECK_CONDITION;
2435                         break;
2436                 }
2437                 break;
2438         case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2439         case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2440                 scsi_status = SAM_STAT_GOOD;
2441                 break;
2442         case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2443         case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2444         default:
2445                 scsi_status = SAM_STAT_CHECK_CONDITION;
2446                 break;
2447         }
2448
2449         if (error_info->data_present) {
2450                 sense_data_length =
2451                         get_unaligned_le16(&error_info->data_length);
2452                 if (sense_data_length) {
2453                         if (sense_data_length > sizeof(error_info->data))
2454                                 sense_data_length = sizeof(error_info->data);
2455                         if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2456                                 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2457                         memcpy(scmd->sense_buffer, error_info->data,
2458                                 sense_data_length);
2459                 }
2460         }
2461
2462         if (device_offline && sense_data_length == 0)
2463                 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2464                         0x3e, 0x1);
2465
2466         scmd->result = scsi_status;
2467         set_host_byte(scmd, host_byte);
2468 }
2469
2470 static void pqi_process_io_error(unsigned int iu_type,
2471         struct pqi_io_request *io_request)
2472 {
2473         switch (iu_type) {
2474         case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2475                 pqi_process_raid_io_error(io_request);
2476                 break;
2477         case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2478                 pqi_process_aio_io_error(io_request);
2479                 break;
2480         }
2481 }
2482
2483 static int pqi_interpret_task_management_response(
2484         struct pqi_task_management_response *response)
2485 {
2486         int rc;
2487
2488         switch (response->response_code) {
2489         case SOP_TMF_COMPLETE:
2490         case SOP_TMF_FUNCTION_SUCCEEDED:
2491                 rc = 0;
2492                 break;
2493         default:
2494                 rc = -EIO;
2495                 break;
2496         }
2497
2498         return rc;
2499 }
2500
2501 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2502         struct pqi_queue_group *queue_group)
2503 {
2504         unsigned int num_responses;
2505         pqi_index_t oq_pi;
2506         pqi_index_t oq_ci;
2507         struct pqi_io_request *io_request;
2508         struct pqi_io_response *response;
2509         u16 request_id;
2510
2511         num_responses = 0;
2512         oq_ci = queue_group->oq_ci_copy;
2513
2514         while (1) {
2515                 oq_pi = *queue_group->oq_pi;
2516                 if (oq_pi == oq_ci)
2517                         break;
2518
2519                 num_responses++;
2520                 response = queue_group->oq_element_array +
2521                         (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2522
2523                 request_id = get_unaligned_le16(&response->request_id);
2524                 WARN_ON(request_id >= ctrl_info->max_io_slots);
2525
2526                 io_request = &ctrl_info->io_request_pool[request_id];
2527                 WARN_ON(atomic_read(&io_request->refcount) == 0);
2528
2529                 switch (response->header.iu_type) {
2530                 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2531                 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2532                         if (io_request->scmd)
2533                                 io_request->scmd->result = 0;
2534                         /* fall through */
2535                 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2536                         break;
2537                 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2538                         io_request->status =
2539                                 pqi_interpret_task_management_response(
2540                                         (void *)response);
2541                         break;
2542                 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2543                         pqi_aio_path_disabled(io_request);
2544                         io_request->status = -EAGAIN;
2545                         break;
2546                 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2547                 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2548                         io_request->error_info = ctrl_info->error_buffer +
2549                                 (get_unaligned_le16(&response->error_index) *
2550                                 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2551                         pqi_process_io_error(response->header.iu_type,
2552                                 io_request);
2553                         break;
2554                 default:
2555                         dev_err(&ctrl_info->pci_dev->dev,
2556                                 "unexpected IU type: 0x%x\n",
2557                                 response->header.iu_type);
2558                         WARN_ON(response->header.iu_type);
2559                         break;
2560                 }
2561
2562                 io_request->io_complete_callback(io_request,
2563                         io_request->context);
2564
2565                 /*
2566                  * Note that the I/O request structure CANNOT BE TOUCHED after
2567                  * returning from the I/O completion callback!
2568                  */
2569
2570                 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2571         }
2572
2573         if (num_responses) {
2574                 queue_group->oq_ci_copy = oq_ci;
2575                 writel(oq_ci, queue_group->oq_ci);
2576         }
2577
2578         return num_responses;
2579 }
2580
2581 static inline unsigned int pqi_num_elements_free(unsigned int pi,
2582         unsigned int ci, unsigned int elements_in_queue)
2583 {
2584         unsigned int num_elements_used;
2585
2586         if (pi >= ci)
2587                 num_elements_used = pi - ci;
2588         else
2589                 num_elements_used = elements_in_queue - ci + pi;
2590
2591         return elements_in_queue - num_elements_used - 1;
2592 }
2593
2594 #define PQI_EVENT_ACK_TIMEOUT   30
2595
2596 static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
2597         struct pqi_event_acknowledge_request *iu, size_t iu_length)
2598 {
2599         pqi_index_t iq_pi;
2600         pqi_index_t iq_ci;
2601         unsigned long flags;
2602         void *next_element;
2603         unsigned long timeout;
2604         struct pqi_queue_group *queue_group;
2605
2606         queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2607         put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2608
2609         timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
2610
2611         while (1) {
2612                 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2613
2614                 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2615                 iq_ci = *queue_group->iq_ci[RAID_PATH];
2616
2617                 if (pqi_num_elements_free(iq_pi, iq_ci,
2618                         ctrl_info->num_elements_per_iq))
2619                         break;
2620
2621                 spin_unlock_irqrestore(
2622                         &queue_group->submit_lock[RAID_PATH], flags);
2623
2624                 if (time_after(jiffies, timeout)) {
2625                         dev_err(&ctrl_info->pci_dev->dev,
2626                                 "sending event acknowledge timed out\n");
2627                         return;
2628                 }
2629         }
2630
2631         next_element = queue_group->iq_element_array[RAID_PATH] +
2632                 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2633
2634         memcpy(next_element, iu, iu_length);
2635
2636         iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
2637
2638         queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2639
2640         /*
2641          * This write notifies the controller that an IU is available to be
2642          * processed.
2643          */
2644         writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2645
2646         spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
2647 }
2648
2649 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2650         struct pqi_event *event)
2651 {
2652         struct pqi_event_acknowledge_request request;
2653
2654         memset(&request, 0, sizeof(request));
2655
2656         request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2657         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2658                 &request.header.iu_length);
2659         request.event_type = event->event_type;
2660         request.event_id = event->event_id;
2661         request.additional_event_id = event->additional_event_id;
2662
2663         pqi_start_event_ack(ctrl_info, &request, sizeof(request));
2664 }
2665
2666 static void pqi_event_worker(struct work_struct *work)
2667 {
2668         unsigned int i;
2669         struct pqi_ctrl_info *ctrl_info;
2670         struct pqi_event *pending_event;
2671         bool got_non_heartbeat_event = false;
2672
2673         ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2674
2675         pending_event = ctrl_info->pending_events;
2676         for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
2677                 if (pending_event->pending) {
2678                         pending_event->pending = false;
2679                         pqi_acknowledge_event(ctrl_info, pending_event);
2680                         if (i != PQI_EVENT_HEARTBEAT)
2681                                 got_non_heartbeat_event = true;
2682                 }
2683                 pending_event++;
2684         }
2685
2686         if (got_non_heartbeat_event)
2687                 pqi_schedule_rescan_worker(ctrl_info);
2688 }
2689
2690 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2691 {
2692         unsigned int i;
2693         unsigned int path;
2694         struct pqi_queue_group *queue_group;
2695         unsigned long flags;
2696         struct pqi_io_request *io_request;
2697         struct pqi_io_request *next;
2698         struct scsi_cmnd *scmd;
2699
2700         ctrl_info->controller_online = false;
2701         dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
2702
2703         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2704                 queue_group = &ctrl_info->queue_groups[i];
2705
2706                 for (path = 0; path < 2; path++) {
2707                         spin_lock_irqsave(
2708                                 &queue_group->submit_lock[path], flags);
2709
2710                         list_for_each_entry_safe(io_request, next,
2711                                 &queue_group->request_list[path],
2712                                 request_list_entry) {
2713
2714                                 scmd = io_request->scmd;
2715                                 if (scmd) {
2716                                         set_host_byte(scmd, DID_NO_CONNECT);
2717                                         pqi_scsi_done(scmd);
2718                                 }
2719
2720                                 list_del(&io_request->request_list_entry);
2721                         }
2722
2723                         spin_unlock_irqrestore(
2724                                 &queue_group->submit_lock[path], flags);
2725                 }
2726         }
2727 }
2728
2729 #define PQI_HEARTBEAT_TIMER_INTERVAL    (5 * HZ)
2730 #define PQI_MAX_HEARTBEAT_REQUESTS      5
2731
2732 static void pqi_heartbeat_timer_handler(unsigned long data)
2733 {
2734         int num_interrupts;
2735         struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2736
2737         num_interrupts = atomic_read(&ctrl_info->num_interrupts);
2738
2739         if (num_interrupts == ctrl_info->previous_num_interrupts) {
2740                 ctrl_info->num_heartbeats_requested++;
2741                 if (ctrl_info->num_heartbeats_requested >
2742                         PQI_MAX_HEARTBEAT_REQUESTS) {
2743                         pqi_take_ctrl_offline(ctrl_info);
2744                         return;
2745                 }
2746                 ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
2747                 schedule_work(&ctrl_info->event_work);
2748         } else {
2749                 ctrl_info->num_heartbeats_requested = 0;
2750         }
2751
2752         ctrl_info->previous_num_interrupts = num_interrupts;
2753         mod_timer(&ctrl_info->heartbeat_timer,
2754                 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2755 }
2756
2757 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2758 {
2759         ctrl_info->previous_num_interrupts =
2760                 atomic_read(&ctrl_info->num_interrupts);
2761
2762         init_timer(&ctrl_info->heartbeat_timer);
2763         ctrl_info->heartbeat_timer.expires =
2764                 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2765         ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2766         ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
2767         add_timer(&ctrl_info->heartbeat_timer);
2768         ctrl_info->heartbeat_timer_started = true;
2769 }
2770
2771 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2772 {
2773         if (ctrl_info->heartbeat_timer_started)
2774                 del_timer_sync(&ctrl_info->heartbeat_timer);
2775 }
2776
2777 static int pqi_event_type_to_event_index(unsigned int event_type)
2778 {
2779         int index;
2780
2781         switch (event_type) {
2782         case PQI_EVENT_TYPE_HEARTBEAT:
2783                 index = PQI_EVENT_HEARTBEAT;
2784                 break;
2785         case PQI_EVENT_TYPE_HOTPLUG:
2786                 index = PQI_EVENT_HOTPLUG;
2787                 break;
2788         case PQI_EVENT_TYPE_HARDWARE:
2789                 index = PQI_EVENT_HARDWARE;
2790                 break;
2791         case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
2792                 index = PQI_EVENT_PHYSICAL_DEVICE;
2793                 break;
2794         case PQI_EVENT_TYPE_LOGICAL_DEVICE:
2795                 index = PQI_EVENT_LOGICAL_DEVICE;
2796                 break;
2797         case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
2798                 index = PQI_EVENT_AIO_STATE_CHANGE;
2799                 break;
2800         case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
2801                 index = PQI_EVENT_AIO_CONFIG_CHANGE;
2802                 break;
2803         default:
2804                 index = -1;
2805                 break;
2806         }
2807
2808         return index;
2809 }
2810
2811 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2812 {
2813         unsigned int num_events;
2814         pqi_index_t oq_pi;
2815         pqi_index_t oq_ci;
2816         struct pqi_event_queue *event_queue;
2817         struct pqi_event_response *response;
2818         struct pqi_event *pending_event;
2819         bool need_delayed_work;
2820         int event_index;
2821
2822         event_queue = &ctrl_info->event_queue;
2823         num_events = 0;
2824         need_delayed_work = false;
2825         oq_ci = event_queue->oq_ci_copy;
2826
2827         while (1) {
2828                 oq_pi = *event_queue->oq_pi;
2829                 if (oq_pi == oq_ci)
2830                         break;
2831
2832                 num_events++;
2833                 response = event_queue->oq_element_array +
2834                         (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2835
2836                 event_index =
2837                         pqi_event_type_to_event_index(response->event_type);
2838
2839                 if (event_index >= 0) {
2840                         if (response->request_acknowlege) {
2841                                 pending_event =
2842                                         &ctrl_info->pending_events[event_index];
2843                                 pending_event->event_type =
2844                                         response->event_type;
2845                                 pending_event->event_id = response->event_id;
2846                                 pending_event->additional_event_id =
2847                                         response->additional_event_id;
2848                                 if (event_index != PQI_EVENT_HEARTBEAT) {
2849                                         pending_event->pending = true;
2850                                         need_delayed_work = true;
2851                                 }
2852                         }
2853                 }
2854
2855                 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2856         }
2857
2858         if (num_events) {
2859                 event_queue->oq_ci_copy = oq_ci;
2860                 writel(oq_ci, event_queue->oq_ci);
2861
2862                 if (need_delayed_work)
2863                         schedule_work(&ctrl_info->event_work);
2864         }
2865
2866         return num_events;
2867 }
2868
2869 static irqreturn_t pqi_irq_handler(int irq, void *data)
2870 {
2871         struct pqi_ctrl_info *ctrl_info;
2872         struct pqi_queue_group *queue_group;
2873         unsigned int num_responses_handled;
2874
2875         queue_group = data;
2876         ctrl_info = queue_group->ctrl_info;
2877
2878         if (!ctrl_info || !queue_group->oq_ci)
2879                 return IRQ_NONE;
2880
2881         num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
2882
2883         if (irq == ctrl_info->event_irq)
2884                 num_responses_handled += pqi_process_event_intr(ctrl_info);
2885
2886         if (num_responses_handled)
2887                 atomic_inc(&ctrl_info->num_interrupts);
2888
2889         pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
2890         pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
2891
2892         return IRQ_HANDLED;
2893 }
2894
2895 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
2896 {
2897         int i;
2898         int rc;
2899
2900         ctrl_info->event_irq = ctrl_info->msix_vectors[0];
2901
2902         for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
2903                 rc = request_irq(ctrl_info->msix_vectors[i],
2904                         pqi_irq_handler, 0,
2905                         DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
2906                 if (rc) {
2907                         dev_err(&ctrl_info->pci_dev->dev,
2908                                 "irq %u init failed with error %d\n",
2909                                 ctrl_info->msix_vectors[i], rc);
2910                         return rc;
2911                 }
2912                 ctrl_info->num_msix_vectors_initialized++;
2913         }
2914
2915         return 0;
2916 }
2917
2918 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
2919 {
2920         int i;
2921
2922         for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2923                 free_irq(ctrl_info->msix_vectors[i],
2924                         ctrl_info->intr_data[i]);
2925 }
2926
2927 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
2928 {
2929         unsigned int i;
2930         int max_vectors;
2931         int num_vectors_enabled;
2932         struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
2933
2934         max_vectors = ctrl_info->num_queue_groups;
2935
2936         for (i = 0; i < max_vectors; i++)
2937                 msix_entries[i].entry = i;
2938
2939         num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
2940                 msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
2941
2942         if (num_vectors_enabled < 0) {
2943                 dev_err(&ctrl_info->pci_dev->dev,
2944                         "MSI-X init failed with error %d\n",
2945                         num_vectors_enabled);
2946                 return num_vectors_enabled;
2947         }
2948
2949         ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
2950         for (i = 0; i < num_vectors_enabled; i++) {
2951                 ctrl_info->msix_vectors[i] = msix_entries[i].vector;
2952                 ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
2953         }
2954
2955         return 0;
2956 }
2957
2958 static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2959 {
2960         int i;
2961         int rc;
2962         int cpu;
2963
2964         cpu = cpumask_first(cpu_online_mask);
2965         for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
2966                 rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
2967                         get_cpu_mask(cpu));
2968                 if (rc)
2969                         dev_err(&ctrl_info->pci_dev->dev,
2970                                 "error %d setting affinity hint for irq vector %u\n",
2971                                 rc, ctrl_info->msix_vectors[i]);
2972                 cpu = cpumask_next(cpu, cpu_online_mask);
2973         }
2974 }
2975
2976 static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2977 {
2978         int i;
2979
2980         for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2981                 irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
2982 }
2983
2984 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
2985 {
2986         unsigned int i;
2987         size_t alloc_length;
2988         size_t element_array_length_per_iq;
2989         size_t element_array_length_per_oq;
2990         void *element_array;
2991         void *next_queue_index;
2992         void *aligned_pointer;
2993         unsigned int num_inbound_queues;
2994         unsigned int num_outbound_queues;
2995         unsigned int num_queue_indexes;
2996         struct pqi_queue_group *queue_group;
2997
2998         element_array_length_per_iq =
2999                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3000                 ctrl_info->num_elements_per_iq;
3001         element_array_length_per_oq =
3002                 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3003                 ctrl_info->num_elements_per_oq;
3004         num_inbound_queues = ctrl_info->num_queue_groups * 2;
3005         num_outbound_queues = ctrl_info->num_queue_groups;
3006         num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3007
3008         aligned_pointer = NULL;
3009
3010         for (i = 0; i < num_inbound_queues; i++) {
3011                 aligned_pointer = PTR_ALIGN(aligned_pointer,
3012                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3013                 aligned_pointer += element_array_length_per_iq;
3014         }
3015
3016         for (i = 0; i < num_outbound_queues; i++) {
3017                 aligned_pointer = PTR_ALIGN(aligned_pointer,
3018                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3019                 aligned_pointer += element_array_length_per_oq;
3020         }
3021
3022         aligned_pointer = PTR_ALIGN(aligned_pointer,
3023                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3024         aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3025                 PQI_EVENT_OQ_ELEMENT_LENGTH;
3026
3027         for (i = 0; i < num_queue_indexes; i++) {
3028                 aligned_pointer = PTR_ALIGN(aligned_pointer,
3029                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
3030                 aligned_pointer += sizeof(pqi_index_t);
3031         }
3032
3033         alloc_length = (size_t)aligned_pointer +
3034                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3035
3036         ctrl_info->queue_memory_base =
3037                 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3038                         alloc_length,
3039                         &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3040
3041         if (!ctrl_info->queue_memory_base) {
3042                 dev_err(&ctrl_info->pci_dev->dev,
3043                         "failed to allocate memory for PQI admin queues\n");
3044                 return -ENOMEM;
3045         }
3046
3047         ctrl_info->queue_memory_length = alloc_length;
3048
3049         element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3050                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3051
3052         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3053                 queue_group = &ctrl_info->queue_groups[i];
3054                 queue_group->iq_element_array[RAID_PATH] = element_array;
3055                 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3056                         ctrl_info->queue_memory_base_dma_handle +
3057                                 (element_array - ctrl_info->queue_memory_base);
3058                 element_array += element_array_length_per_iq;
3059                 element_array = PTR_ALIGN(element_array,
3060                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3061                 queue_group->iq_element_array[AIO_PATH] = element_array;
3062                 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3063                         ctrl_info->queue_memory_base_dma_handle +
3064                         (element_array - ctrl_info->queue_memory_base);
3065                 element_array += element_array_length_per_iq;
3066                 element_array = PTR_ALIGN(element_array,
3067                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3068         }
3069
3070         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3071                 queue_group = &ctrl_info->queue_groups[i];
3072                 queue_group->oq_element_array = element_array;
3073                 queue_group->oq_element_array_bus_addr =
3074                         ctrl_info->queue_memory_base_dma_handle +
3075                         (element_array - ctrl_info->queue_memory_base);
3076                 element_array += element_array_length_per_oq;
3077                 element_array = PTR_ALIGN(element_array,
3078                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3079         }
3080
3081         ctrl_info->event_queue.oq_element_array = element_array;
3082         ctrl_info->event_queue.oq_element_array_bus_addr =
3083                 ctrl_info->queue_memory_base_dma_handle +
3084                 (element_array - ctrl_info->queue_memory_base);
3085         element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3086                 PQI_EVENT_OQ_ELEMENT_LENGTH;
3087
3088         next_queue_index = PTR_ALIGN(element_array,
3089                 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3090
3091         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3092                 queue_group = &ctrl_info->queue_groups[i];
3093                 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3094                 queue_group->iq_ci_bus_addr[RAID_PATH] =
3095                         ctrl_info->queue_memory_base_dma_handle +
3096                         (next_queue_index - ctrl_info->queue_memory_base);
3097                 next_queue_index += sizeof(pqi_index_t);
3098                 next_queue_index = PTR_ALIGN(next_queue_index,
3099                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
3100                 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3101                 queue_group->iq_ci_bus_addr[AIO_PATH] =
3102                         ctrl_info->queue_memory_base_dma_handle +
3103                         (next_queue_index - ctrl_info->queue_memory_base);
3104                 next_queue_index += sizeof(pqi_index_t);
3105                 next_queue_index = PTR_ALIGN(next_queue_index,
3106                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
3107                 queue_group->oq_pi = next_queue_index;
3108                 queue_group->oq_pi_bus_addr =
3109                         ctrl_info->queue_memory_base_dma_handle +
3110                         (next_queue_index - ctrl_info->queue_memory_base);
3111                 next_queue_index += sizeof(pqi_index_t);
3112                 next_queue_index = PTR_ALIGN(next_queue_index,
3113                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
3114         }
3115
3116         ctrl_info->event_queue.oq_pi = next_queue_index;
3117         ctrl_info->event_queue.oq_pi_bus_addr =
3118                 ctrl_info->queue_memory_base_dma_handle +
3119                 (next_queue_index - ctrl_info->queue_memory_base);
3120
3121         return 0;
3122 }
3123
3124 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3125 {
3126         unsigned int i;
3127         u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3128         u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3129
3130         /*
3131          * Initialize the backpointers to the controller structure in
3132          * each operational queue group structure.
3133          */
3134         for (i = 0; i < ctrl_info->num_queue_groups; i++)
3135                 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3136
3137         /*
3138          * Assign IDs to all operational queues.  Note that the IDs
3139          * assigned to operational IQs are independent of the IDs
3140          * assigned to operational OQs.
3141          */
3142         ctrl_info->event_queue.oq_id = next_oq_id++;
3143         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3144                 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3145                 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3146                 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3147         }
3148
3149         /*
3150          * Assign MSI-X table entry indexes to all queues.  Note that the
3151          * interrupt for the event queue is shared with the first queue group.
3152          */
3153         ctrl_info->event_queue.int_msg_num = 0;
3154         for (i = 0; i < ctrl_info->num_queue_groups; i++)
3155                 ctrl_info->queue_groups[i].int_msg_num = i;
3156
3157         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3158                 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3159                 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3160                 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3161                 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3162         }
3163 }
3164
3165 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3166 {
3167         size_t alloc_length;
3168         struct pqi_admin_queues_aligned *admin_queues_aligned;
3169         struct pqi_admin_queues *admin_queues;
3170
3171         alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3172                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3173
3174         ctrl_info->admin_queue_memory_base =
3175                 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3176                         alloc_length,
3177                         &ctrl_info->admin_queue_memory_base_dma_handle,
3178                         GFP_KERNEL);
3179
3180         if (!ctrl_info->admin_queue_memory_base)
3181                 return -ENOMEM;
3182
3183         ctrl_info->admin_queue_memory_length = alloc_length;
3184
3185         admin_queues = &ctrl_info->admin_queues;
3186         admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3187                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3188         admin_queues->iq_element_array =
3189                 &admin_queues_aligned->iq_element_array;
3190         admin_queues->oq_element_array =
3191                 &admin_queues_aligned->oq_element_array;
3192         admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3193         admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3194
3195         admin_queues->iq_element_array_bus_addr =
3196                 ctrl_info->admin_queue_memory_base_dma_handle +
3197                 (admin_queues->iq_element_array -
3198                 ctrl_info->admin_queue_memory_base);
3199         admin_queues->oq_element_array_bus_addr =
3200                 ctrl_info->admin_queue_memory_base_dma_handle +
3201                 (admin_queues->oq_element_array -
3202                 ctrl_info->admin_queue_memory_base);
3203         admin_queues->iq_ci_bus_addr =
3204                 ctrl_info->admin_queue_memory_base_dma_handle +
3205                 ((void *)admin_queues->iq_ci -
3206                 ctrl_info->admin_queue_memory_base);
3207         admin_queues->oq_pi_bus_addr =
3208                 ctrl_info->admin_queue_memory_base_dma_handle +
3209                 ((void *)admin_queues->oq_pi -
3210                 ctrl_info->admin_queue_memory_base);
3211
3212         return 0;
3213 }
3214
3215 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES          HZ
3216 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS      1
3217
3218 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3219 {
3220         struct pqi_device_registers __iomem *pqi_registers;
3221         struct pqi_admin_queues *admin_queues;
3222         unsigned long timeout;
3223         u8 status;
3224         u32 reg;
3225
3226         pqi_registers = ctrl_info->pqi_registers;
3227         admin_queues = &ctrl_info->admin_queues;
3228
3229         writeq((u64)admin_queues->iq_element_array_bus_addr,
3230                 &pqi_registers->admin_iq_element_array_addr);
3231         writeq((u64)admin_queues->oq_element_array_bus_addr,
3232                 &pqi_registers->admin_oq_element_array_addr);
3233         writeq((u64)admin_queues->iq_ci_bus_addr,
3234                 &pqi_registers->admin_iq_ci_addr);
3235         writeq((u64)admin_queues->oq_pi_bus_addr,
3236                 &pqi_registers->admin_oq_pi_addr);
3237
3238         reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3239                 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3240                 (admin_queues->int_msg_num << 16);
3241         writel(reg, &pqi_registers->admin_iq_num_elements);
3242         writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3243                 &pqi_registers->function_and_status_code);
3244
3245         timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3246         while (1) {
3247                 status = readb(&pqi_registers->function_and_status_code);
3248                 if (status == PQI_STATUS_IDLE)
3249                         break;
3250                 if (time_after(jiffies, timeout))
3251                         return -ETIMEDOUT;
3252                 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3253         }
3254
3255         /*
3256          * The offset registers are not initialized to the correct
3257          * offsets until *after* the create admin queue pair command
3258          * completes successfully.
3259          */
3260         admin_queues->iq_pi = ctrl_info->iomem_base +
3261                 PQI_DEVICE_REGISTERS_OFFSET +
3262                 readq(&pqi_registers->admin_iq_pi_offset);
3263         admin_queues->oq_ci = ctrl_info->iomem_base +
3264                 PQI_DEVICE_REGISTERS_OFFSET +
3265                 readq(&pqi_registers->admin_oq_ci_offset);
3266
3267         return 0;
3268 }
3269
3270 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3271         struct pqi_general_admin_request *request)
3272 {
3273         struct pqi_admin_queues *admin_queues;
3274         void *next_element;
3275         pqi_index_t iq_pi;
3276
3277         admin_queues = &ctrl_info->admin_queues;
3278         iq_pi = admin_queues->iq_pi_copy;
3279
3280         next_element = admin_queues->iq_element_array +
3281                 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3282
3283         memcpy(next_element, request, sizeof(*request));
3284
3285         iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3286         admin_queues->iq_pi_copy = iq_pi;
3287
3288         /*
3289          * This write notifies the controller that an IU is available to be
3290          * processed.
3291          */
3292         writel(iq_pi, admin_queues->iq_pi);
3293 }
3294
3295 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3296         struct pqi_general_admin_response *response)
3297 {
3298         struct pqi_admin_queues *admin_queues;
3299         pqi_index_t oq_pi;
3300         pqi_index_t oq_ci;
3301         unsigned long timeout;
3302
3303         admin_queues = &ctrl_info->admin_queues;
3304         oq_ci = admin_queues->oq_ci_copy;
3305
3306         timeout = (3 * HZ) + jiffies;
3307
3308         while (1) {
3309                 oq_pi = *admin_queues->oq_pi;
3310                 if (oq_pi != oq_ci)
3311                         break;
3312                 if (time_after(jiffies, timeout)) {
3313                         dev_err(&ctrl_info->pci_dev->dev,
3314                                 "timed out waiting for admin response\n");
3315                         return -ETIMEDOUT;
3316                 }
3317                 usleep_range(1000, 2000);
3318         }
3319
3320         memcpy(response, admin_queues->oq_element_array +
3321                 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3322
3323         oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3324         admin_queues->oq_ci_copy = oq_ci;
3325         writel(oq_ci, admin_queues->oq_ci);
3326
3327         return 0;
3328 }
3329
3330 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3331         struct pqi_queue_group *queue_group, enum pqi_io_path path,
3332         struct pqi_io_request *io_request)
3333 {
3334         struct pqi_io_request *next;
3335         void *next_element;
3336         pqi_index_t iq_pi;
3337         pqi_index_t iq_ci;
3338         size_t iu_length;
3339         unsigned long flags;
3340         unsigned int num_elements_needed;
3341         unsigned int num_elements_to_end_of_queue;
3342         size_t copy_count;
3343         struct pqi_iu_header *request;
3344
3345         spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3346
3347         if (io_request)
3348                 list_add_tail(&io_request->request_list_entry,
3349                         &queue_group->request_list[path]);
3350
3351         iq_pi = queue_group->iq_pi_copy[path];
3352
3353         list_for_each_entry_safe(io_request, next,
3354                 &queue_group->request_list[path], request_list_entry) {
3355
3356                 request = io_request->iu;
3357
3358                 iu_length = get_unaligned_le16(&request->iu_length) +
3359                         PQI_REQUEST_HEADER_LENGTH;
3360                 num_elements_needed =
3361                         DIV_ROUND_UP(iu_length,
3362                                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3363
3364                 iq_ci = *queue_group->iq_ci[path];
3365
3366                 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3367                         ctrl_info->num_elements_per_iq))
3368                         break;
3369
3370                 put_unaligned_le16(queue_group->oq_id,
3371                         &request->response_queue_id);
3372
3373                 next_element = queue_group->iq_element_array[path] +
3374                         (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3375
3376                 num_elements_to_end_of_queue =
3377                         ctrl_info->num_elements_per_iq - iq_pi;
3378
3379                 if (num_elements_needed <= num_elements_to_end_of_queue) {
3380                         memcpy(next_element, request, iu_length);
3381                 } else {
3382                         copy_count = num_elements_to_end_of_queue *
3383                                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3384                         memcpy(next_element, request, copy_count);
3385                         memcpy(queue_group->iq_element_array[path],
3386                                 (u8 *)request + copy_count,
3387                                 iu_length - copy_count);
3388                 }
3389
3390                 iq_pi = (iq_pi + num_elements_needed) %
3391                         ctrl_info->num_elements_per_iq;
3392
3393                 list_del(&io_request->request_list_entry);
3394         }
3395
3396         if (iq_pi != queue_group->iq_pi_copy[path]) {
3397                 queue_group->iq_pi_copy[path] = iq_pi;
3398                 /*
3399                  * This write notifies the controller that one or more IUs are
3400                  * available to be processed.
3401                  */
3402                 writel(iq_pi, queue_group->iq_pi[path]);
3403         }
3404
3405         spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3406 }
3407
3408 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3409         void *context)
3410 {
3411         struct completion *waiting = context;
3412
3413         complete(waiting);
3414 }
3415
3416 static int pqi_submit_raid_request_synchronous_with_io_request(
3417         struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3418         unsigned long timeout_msecs)
3419 {
3420         int rc = 0;
3421         DECLARE_COMPLETION_ONSTACK(wait);
3422
3423         io_request->io_complete_callback = pqi_raid_synchronous_complete;
3424         io_request->context = &wait;
3425
3426         pqi_start_io(ctrl_info,
3427                 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3428                 io_request);
3429
3430         if (timeout_msecs == NO_TIMEOUT) {
3431                 wait_for_completion_io(&wait);
3432         } else {
3433                 if (!wait_for_completion_io_timeout(&wait,
3434                         msecs_to_jiffies(timeout_msecs))) {
3435                         dev_warn(&ctrl_info->pci_dev->dev,
3436                                 "command timed out\n");
3437                         rc = -ETIMEDOUT;
3438                 }
3439         }
3440
3441         return rc;
3442 }
3443
3444 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3445         struct pqi_iu_header *request, unsigned int flags,
3446         struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3447 {
3448         int rc;
3449         struct pqi_io_request *io_request;
3450         unsigned long start_jiffies;
3451         unsigned long msecs_blocked;
3452         size_t iu_length;
3453
3454         /*
3455          * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3456          * are mutually exclusive.
3457          */
3458
3459         if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3460                 if (down_interruptible(&ctrl_info->sync_request_sem))
3461                         return -ERESTARTSYS;
3462         } else {
3463                 if (timeout_msecs == NO_TIMEOUT) {
3464                         down(&ctrl_info->sync_request_sem);
3465                 } else {
3466                         start_jiffies = jiffies;
3467                         if (down_timeout(&ctrl_info->sync_request_sem,
3468                                 msecs_to_jiffies(timeout_msecs)))
3469                                 return -ETIMEDOUT;
3470                         msecs_blocked =
3471                                 jiffies_to_msecs(jiffies - start_jiffies);
3472                         if (msecs_blocked >= timeout_msecs)
3473                                 return -ETIMEDOUT;
3474                         timeout_msecs -= msecs_blocked;
3475                 }
3476         }
3477
3478         io_request = pqi_alloc_io_request(ctrl_info);
3479
3480         put_unaligned_le16(io_request->index,
3481                 &(((struct pqi_raid_path_request *)request)->request_id));
3482
3483         if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3484                 ((struct pqi_raid_path_request *)request)->error_index =
3485                         ((struct pqi_raid_path_request *)request)->request_id;
3486
3487         iu_length = get_unaligned_le16(&request->iu_length) +
3488                 PQI_REQUEST_HEADER_LENGTH;
3489         memcpy(io_request->iu, request, iu_length);
3490
3491         rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3492                 io_request, timeout_msecs);
3493
3494         if (error_info) {
3495                 if (io_request->error_info)
3496                         memcpy(error_info, io_request->error_info,
3497                                 sizeof(*error_info));
3498                 else
3499                         memset(error_info, 0, sizeof(*error_info));
3500         } else if (rc == 0 && io_request->error_info) {
3501                 u8 scsi_status;
3502                 struct pqi_raid_error_info *raid_error_info;
3503
3504                 raid_error_info = io_request->error_info;
3505                 scsi_status = raid_error_info->status;
3506
3507                 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3508                         raid_error_info->data_out_result ==
3509                         PQI_DATA_IN_OUT_UNDERFLOW)
3510                         scsi_status = SAM_STAT_GOOD;
3511
3512                 if (scsi_status != SAM_STAT_GOOD)
3513                         rc = -EIO;
3514         }
3515
3516         pqi_free_io_request(io_request);
3517
3518         up(&ctrl_info->sync_request_sem);
3519
3520         return rc;
3521 }
3522
3523 static int pqi_validate_admin_response(
3524         struct pqi_general_admin_response *response, u8 expected_function_code)
3525 {
3526         if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3527                 return -EINVAL;
3528
3529         if (get_unaligned_le16(&response->header.iu_length) !=
3530                 PQI_GENERAL_ADMIN_IU_LENGTH)
3531                 return -EINVAL;
3532
3533         if (response->function_code != expected_function_code)
3534                 return -EINVAL;
3535
3536         if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3537                 return -EINVAL;
3538
3539         return 0;
3540 }
3541
3542 static int pqi_submit_admin_request_synchronous(
3543         struct pqi_ctrl_info *ctrl_info,
3544         struct pqi_general_admin_request *request,
3545         struct pqi_general_admin_response *response)
3546 {
3547         int rc;
3548
3549         pqi_submit_admin_request(ctrl_info, request);
3550
3551         rc = pqi_poll_for_admin_response(ctrl_info, response);
3552
3553         if (rc == 0)
3554                 rc = pqi_validate_admin_response(response,
3555                         request->function_code);
3556
3557         return rc;
3558 }
3559
3560 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3561 {
3562         int rc;
3563         struct pqi_general_admin_request request;
3564         struct pqi_general_admin_response response;
3565         struct pqi_device_capability *capability;
3566         struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3567
3568         capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3569         if (!capability)
3570                 return -ENOMEM;
3571
3572         memset(&request, 0, sizeof(request));
3573
3574         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3575         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3576                 &request.header.iu_length);
3577         request.function_code =
3578                 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3579         put_unaligned_le32(sizeof(*capability),
3580                 &request.data.report_device_capability.buffer_length);
3581
3582         rc = pqi_map_single(ctrl_info->pci_dev,
3583                 &request.data.report_device_capability.sg_descriptor,
3584                 capability, sizeof(*capability),
3585                 PCI_DMA_FROMDEVICE);
3586         if (rc)
3587                 goto out;
3588
3589         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3590                 &response);
3591
3592         pqi_pci_unmap(ctrl_info->pci_dev,
3593                 &request.data.report_device_capability.sg_descriptor, 1,
3594                 PCI_DMA_FROMDEVICE);
3595
3596         if (rc)
3597                 goto out;
3598
3599         if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3600                 rc = -EIO;
3601                 goto out;
3602         }
3603
3604         ctrl_info->max_inbound_queues =
3605                 get_unaligned_le16(&capability->max_inbound_queues);
3606         ctrl_info->max_elements_per_iq =
3607                 get_unaligned_le16(&capability->max_elements_per_iq);
3608         ctrl_info->max_iq_element_length =
3609                 get_unaligned_le16(&capability->max_iq_element_length)
3610                 * 16;
3611         ctrl_info->max_outbound_queues =
3612                 get_unaligned_le16(&capability->max_outbound_queues);
3613         ctrl_info->max_elements_per_oq =
3614                 get_unaligned_le16(&capability->max_elements_per_oq);
3615         ctrl_info->max_oq_element_length =
3616                 get_unaligned_le16(&capability->max_oq_element_length)
3617                 * 16;
3618
3619         sop_iu_layer_descriptor =
3620                 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3621
3622         ctrl_info->max_inbound_iu_length_per_firmware =
3623                 get_unaligned_le16(
3624                         &sop_iu_layer_descriptor->max_inbound_iu_length);
3625         ctrl_info->inbound_spanning_supported =
3626                 sop_iu_layer_descriptor->inbound_spanning_supported;
3627         ctrl_info->outbound_spanning_supported =
3628                 sop_iu_layer_descriptor->outbound_spanning_supported;
3629
3630 out:
3631         kfree(capability);
3632
3633         return rc;
3634 }
3635
3636 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3637 {
3638         if (ctrl_info->max_iq_element_length <
3639                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3640                 dev_err(&ctrl_info->pci_dev->dev,
3641                         "max. inbound queue element length of %d is less than the required length of %d\n",
3642                         ctrl_info->max_iq_element_length,
3643                         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3644                 return -EINVAL;
3645         }
3646
3647         if (ctrl_info->max_oq_element_length <
3648                 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3649                 dev_err(&ctrl_info->pci_dev->dev,
3650                         "max. outbound queue element length of %d is less than the required length of %d\n",
3651                         ctrl_info->max_oq_element_length,
3652                         PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3653                 return -EINVAL;
3654         }
3655
3656         if (ctrl_info->max_inbound_iu_length_per_firmware <
3657                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3658                 dev_err(&ctrl_info->pci_dev->dev,
3659                         "max. inbound IU length of %u is less than the min. required length of %d\n",
3660                         ctrl_info->max_inbound_iu_length_per_firmware,
3661                         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3662                 return -EINVAL;
3663         }
3664
3665         if (!ctrl_info->inbound_spanning_supported) {
3666                 dev_err(&ctrl_info->pci_dev->dev,
3667                         "the controller does not support inbound spanning\n");
3668                 return -EINVAL;
3669         }
3670
3671         if (ctrl_info->outbound_spanning_supported) {
3672                 dev_err(&ctrl_info->pci_dev->dev,
3673                         "the controller supports outbound spanning but this driver does not\n");
3674                 return -EINVAL;
3675         }
3676
3677         return 0;
3678 }
3679
3680 static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3681         bool inbound_queue, u16 queue_id)
3682 {
3683         struct pqi_general_admin_request request;
3684         struct pqi_general_admin_response response;
3685
3686         memset(&request, 0, sizeof(request));
3687         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3688         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3689                 &request.header.iu_length);
3690         if (inbound_queue)
3691                 request.function_code =
3692                         PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3693         else
3694                 request.function_code =
3695                         PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3696         put_unaligned_le16(queue_id,
3697                 &request.data.delete_operational_queue.queue_id);
3698
3699         return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3700                 &response);
3701 }
3702
3703 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3704 {
3705         int rc;
3706         struct pqi_event_queue *event_queue;
3707         struct pqi_general_admin_request request;
3708         struct pqi_general_admin_response response;
3709
3710         event_queue = &ctrl_info->event_queue;
3711
3712         /*
3713          * Create OQ (Outbound Queue - device to host queue) to dedicate
3714          * to events.
3715          */
3716         memset(&request, 0, sizeof(request));
3717         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3718         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3719                 &request.header.iu_length);
3720         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3721         put_unaligned_le16(event_queue->oq_id,
3722                 &request.data.create_operational_oq.queue_id);
3723         put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3724                 &request.data.create_operational_oq.element_array_addr);
3725         put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3726                 &request.data.create_operational_oq.pi_addr);
3727         put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3728                 &request.data.create_operational_oq.num_elements);
3729         put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3730                 &request.data.create_operational_oq.element_length);
3731         request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3732         put_unaligned_le16(event_queue->int_msg_num,
3733                 &request.data.create_operational_oq.int_msg_num);
3734
3735         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3736                 &response);
3737         if (rc)
3738                 return rc;
3739
3740         event_queue->oq_ci = ctrl_info->iomem_base +
3741                 PQI_DEVICE_REGISTERS_OFFSET +
3742                 get_unaligned_le64(
3743                         &response.data.create_operational_oq.oq_ci_offset);
3744
3745         return 0;
3746 }
3747
3748 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
3749 {
3750         unsigned int i;
3751         int rc;
3752         struct pqi_queue_group *queue_group;
3753         struct pqi_general_admin_request request;
3754         struct pqi_general_admin_response response;
3755
3756         i = ctrl_info->num_active_queue_groups;
3757         queue_group = &ctrl_info->queue_groups[i];
3758
3759         /*
3760          * Create IQ (Inbound Queue - host to device queue) for
3761          * RAID path.
3762          */
3763         memset(&request, 0, sizeof(request));
3764         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3765         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3766                 &request.header.iu_length);
3767         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3768         put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3769                 &request.data.create_operational_iq.queue_id);
3770         put_unaligned_le64(
3771                 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3772                 &request.data.create_operational_iq.element_array_addr);
3773         put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3774                 &request.data.create_operational_iq.ci_addr);
3775         put_unaligned_le16(ctrl_info->num_elements_per_iq,
3776                 &request.data.create_operational_iq.num_elements);
3777         put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3778                 &request.data.create_operational_iq.element_length);
3779         request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3780
3781         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3782                 &response);
3783         if (rc) {
3784                 dev_err(&ctrl_info->pci_dev->dev,
3785                         "error creating inbound RAID queue\n");
3786                 return rc;
3787         }
3788
3789         queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3790                 PQI_DEVICE_REGISTERS_OFFSET +
3791                 get_unaligned_le64(
3792                         &response.data.create_operational_iq.iq_pi_offset);
3793
3794         /*
3795          * Create IQ (Inbound Queue - host to device queue) for
3796          * Advanced I/O (AIO) path.
3797          */
3798         memset(&request, 0, sizeof(request));
3799         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3800         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3801                 &request.header.iu_length);
3802         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3803         put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3804                 &request.data.create_operational_iq.queue_id);
3805         put_unaligned_le64((u64)queue_group->
3806                 iq_element_array_bus_addr[AIO_PATH],
3807                 &request.data.create_operational_iq.element_array_addr);
3808         put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
3809                 &request.data.create_operational_iq.ci_addr);
3810         put_unaligned_le16(ctrl_info->num_elements_per_iq,
3811                 &request.data.create_operational_iq.num_elements);
3812         put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3813                 &request.data.create_operational_iq.element_length);
3814         request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3815
3816         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3817                 &response);
3818         if (rc) {
3819                 dev_err(&ctrl_info->pci_dev->dev,
3820                         "error creating inbound AIO queue\n");
3821                 goto delete_inbound_queue_raid;
3822         }
3823
3824         queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
3825                 PQI_DEVICE_REGISTERS_OFFSET +
3826                 get_unaligned_le64(
3827                         &response.data.create_operational_iq.iq_pi_offset);
3828
3829         /*
3830          * Designate the 2nd IQ as the AIO path.  By default, all IQs are
3831          * assumed to be for RAID path I/O unless we change the queue's
3832          * property.
3833          */
3834         memset(&request, 0, sizeof(request));
3835         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3836         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3837                 &request.header.iu_length);
3838         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
3839         put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3840                 &request.data.change_operational_iq_properties.queue_id);
3841         put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
3842                 &request.data.change_operational_iq_properties.vendor_specific);
3843
3844         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3845                 &response);
3846         if (rc) {
3847                 dev_err(&ctrl_info->pci_dev->dev,
3848                         "error changing queue property\n");
3849                 goto delete_inbound_queue_aio;
3850         }
3851
3852         /*
3853          * Create OQ (Outbound Queue - device to host queue).
3854          */
3855         memset(&request, 0, sizeof(request));
3856         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3857         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3858                 &request.header.iu_length);
3859         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3860         put_unaligned_le16(queue_group->oq_id,
3861                 &request.data.create_operational_oq.queue_id);
3862         put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
3863                 &request.data.create_operational_oq.element_array_addr);
3864         put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
3865                 &request.data.create_operational_oq.pi_addr);
3866         put_unaligned_le16(ctrl_info->num_elements_per_oq,
3867                 &request.data.create_operational_oq.num_elements);
3868         put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
3869                 &request.data.create_operational_oq.element_length);
3870         request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3871         put_unaligned_le16(queue_group->int_msg_num,
3872                 &request.data.create_operational_oq.int_msg_num);
3873
3874         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3875                 &response);
3876         if (rc) {
3877                 dev_err(&ctrl_info->pci_dev->dev,
3878                         "error creating outbound queue\n");
3879                 goto delete_inbound_queue_aio;
3880         }
3881
3882         queue_group->oq_ci = ctrl_info->iomem_base +
3883                 PQI_DEVICE_REGISTERS_OFFSET +
3884                 get_unaligned_le64(
3885                         &response.data.create_operational_oq.oq_ci_offset);
3886
3887         ctrl_info->num_active_queue_groups++;
3888
3889         return 0;
3890
3891 delete_inbound_queue_aio:
3892         pqi_delete_operational_queue(ctrl_info, true,
3893                 queue_group->iq_id[AIO_PATH]);
3894
3895 delete_inbound_queue_raid:
3896         pqi_delete_operational_queue(ctrl_info, true,
3897                 queue_group->iq_id[RAID_PATH]);
3898
3899         return rc;
3900 }
3901
3902 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
3903 {
3904         int rc;
3905         unsigned int i;
3906
3907         rc = pqi_create_event_queue(ctrl_info);
3908         if (rc) {
3909                 dev_err(&ctrl_info->pci_dev->dev,
3910                         "error creating event queue\n");
3911                 return rc;
3912         }
3913
3914         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3915                 rc = pqi_create_queue_group(ctrl_info);
3916                 if (rc) {
3917                         dev_err(&ctrl_info->pci_dev->dev,
3918                                 "error creating queue group number %u/%u\n",
3919                                 i, ctrl_info->num_queue_groups);
3920                         return rc;
3921                 }
3922         }
3923
3924         return 0;
3925 }
3926
3927 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH   \
3928         (offsetof(struct pqi_event_config, descriptors) + \
3929         (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
3930
3931 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
3932 {
3933         int rc;
3934         unsigned int i;
3935         struct pqi_event_config *event_config;
3936         struct pqi_general_management_request request;
3937
3938         event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3939                 GFP_KERNEL);
3940         if (!event_config)
3941                 return -ENOMEM;
3942
3943         memset(&request, 0, sizeof(request));
3944
3945         request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
3946         put_unaligned_le16(offsetof(struct pqi_general_management_request,
3947                 data.report_event_configuration.sg_descriptors[1]) -
3948                 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3949         put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3950                 &request.data.report_event_configuration.buffer_length);
3951
3952         rc = pqi_map_single(ctrl_info->pci_dev,
3953                 request.data.report_event_configuration.sg_descriptors,
3954                 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3955                 PCI_DMA_FROMDEVICE);
3956         if (rc)
3957                 goto out;
3958
3959         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
3960                 0, NULL, NO_TIMEOUT);
3961
3962         pqi_pci_unmap(ctrl_info->pci_dev,
3963                 request.data.report_event_configuration.sg_descriptors, 1,
3964                 PCI_DMA_FROMDEVICE);
3965
3966         if (rc)
3967                 goto out;
3968
3969         for (i = 0; i < event_config->num_event_descriptors; i++)
3970                 put_unaligned_le16(ctrl_info->event_queue.oq_id,
3971                         &event_config->descriptors[i].oq_id);
3972
3973         memset(&request, 0, sizeof(request));
3974
3975         request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
3976         put_unaligned_le16(offsetof(struct pqi_general_management_request,
3977                 data.report_event_configuration.sg_descriptors[1]) -
3978                 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3979         put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3980                 &request.data.report_event_configuration.buffer_length);
3981
3982         rc = pqi_map_single(ctrl_info->pci_dev,
3983                 request.data.report_event_configuration.sg_descriptors,
3984                 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3985                 PCI_DMA_TODEVICE);
3986         if (rc)
3987                 goto out;
3988
3989         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
3990                 NULL, NO_TIMEOUT);
3991
3992         pqi_pci_unmap(ctrl_info->pci_dev,
3993                 request.data.report_event_configuration.sg_descriptors, 1,
3994                 PCI_DMA_TODEVICE);
3995
3996 out:
3997         kfree(event_config);
3998
3999         return rc;
4000 }
4001
4002 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4003 {
4004         unsigned int i;
4005         struct device *dev;
4006         size_t sg_chain_buffer_length;
4007         struct pqi_io_request *io_request;
4008
4009         if (!ctrl_info->io_request_pool)
4010                 return;
4011
4012         dev = &ctrl_info->pci_dev->dev;
4013         sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4014         io_request = ctrl_info->io_request_pool;
4015
4016         for (i = 0; i < ctrl_info->max_io_slots; i++) {
4017                 kfree(io_request->iu);
4018                 if (!io_request->sg_chain_buffer)
4019                         break;
4020                 dma_free_coherent(dev, sg_chain_buffer_length,
4021                         io_request->sg_chain_buffer,
4022                         io_request->sg_chain_buffer_dma_handle);
4023                 io_request++;
4024         }
4025
4026         kfree(ctrl_info->io_request_pool);
4027         ctrl_info->io_request_pool = NULL;
4028 }
4029
4030 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4031 {
4032         ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4033                 ctrl_info->error_buffer_length,
4034                 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4035
4036         if (!ctrl_info->error_buffer)
4037                 return -ENOMEM;
4038
4039         return 0;
4040 }
4041
4042 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4043 {
4044         unsigned int i;
4045         void *sg_chain_buffer;
4046         size_t sg_chain_buffer_length;
4047         dma_addr_t sg_chain_buffer_dma_handle;
4048         struct device *dev;
4049         struct pqi_io_request *io_request;
4050
4051         ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4052                 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4053
4054         if (!ctrl_info->io_request_pool) {
4055                 dev_err(&ctrl_info->pci_dev->dev,
4056                         "failed to allocate I/O request pool\n");
4057                 goto error;
4058         }
4059
4060         dev = &ctrl_info->pci_dev->dev;
4061         sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4062         io_request = ctrl_info->io_request_pool;
4063
4064         for (i = 0; i < ctrl_info->max_io_slots; i++) {
4065                 io_request->iu =
4066                         kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4067
4068                 if (!io_request->iu) {
4069                         dev_err(&ctrl_info->pci_dev->dev,
4070                                 "failed to allocate IU buffers\n");
4071                         goto error;
4072                 }
4073
4074                 sg_chain_buffer = dma_alloc_coherent(dev,
4075                         sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4076                         GFP_KERNEL);
4077
4078                 if (!sg_chain_buffer) {
4079                         dev_err(&ctrl_info->pci_dev->dev,
4080                                 "failed to allocate PQI scatter-gather chain buffers\n");
4081                         goto error;
4082                 }
4083
4084                 io_request->index = i;
4085                 io_request->sg_chain_buffer = sg_chain_buffer;
4086                 io_request->sg_chain_buffer_dma_handle =
4087                         sg_chain_buffer_dma_handle;
4088                 io_request++;
4089         }
4090
4091         return 0;
4092
4093 error:
4094         pqi_free_all_io_requests(ctrl_info);
4095
4096         return -ENOMEM;
4097 }
4098
4099 /*
4100  * Calculate required resources that are sized based on max. outstanding
4101  * requests and max. transfer size.
4102  */
4103
4104 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4105 {
4106         u32 max_transfer_size;
4107         u32 max_sg_entries;
4108
4109         ctrl_info->scsi_ml_can_queue =
4110                 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4111         ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4112
4113         ctrl_info->error_buffer_length =
4114                 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4115
4116         max_transfer_size =
4117                 min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
4118
4119         max_sg_entries = max_transfer_size / PAGE_SIZE;
4120
4121         /* +1 to cover when the buffer is not page-aligned. */
4122         max_sg_entries++;
4123
4124         max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4125
4126         max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4127
4128         ctrl_info->sg_chain_buffer_length =
4129                 max_sg_entries * sizeof(struct pqi_sg_descriptor);
4130         ctrl_info->sg_tablesize = max_sg_entries;
4131         ctrl_info->max_sectors = max_transfer_size / 512;
4132 }
4133
4134 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4135 {
4136         int num_cpus;
4137         int max_queue_groups;
4138         int num_queue_groups;
4139         u16 num_elements_per_iq;
4140         u16 num_elements_per_oq;
4141
4142         max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4143                 ctrl_info->max_outbound_queues - 1);
4144         max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4145
4146         num_cpus = num_online_cpus();
4147         num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4148         num_queue_groups = min(num_queue_groups, max_queue_groups);
4149
4150         ctrl_info->num_queue_groups = num_queue_groups;
4151
4152         /*
4153          * Make sure that the max. inbound IU length is an even multiple
4154          * of our inbound element length.
4155          */
4156         ctrl_info->max_inbound_iu_length =
4157                 (ctrl_info->max_inbound_iu_length_per_firmware /
4158                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4159                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4160
4161         num_elements_per_iq =
4162                 (ctrl_info->max_inbound_iu_length /
4163                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4164
4165         /* Add one because one element in each queue is unusable. */
4166         num_elements_per_iq++;
4167
4168         num_elements_per_iq = min(num_elements_per_iq,
4169                 ctrl_info->max_elements_per_iq);
4170
4171         num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4172         num_elements_per_oq = min(num_elements_per_oq,
4173                 ctrl_info->max_elements_per_oq);
4174
4175         ctrl_info->num_elements_per_iq = num_elements_per_iq;
4176         ctrl_info->num_elements_per_oq = num_elements_per_oq;
4177
4178         ctrl_info->max_sg_per_iu =
4179                 ((ctrl_info->max_inbound_iu_length -
4180                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4181                 sizeof(struct pqi_sg_descriptor)) +
4182                 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4183 }
4184
4185 static inline void pqi_set_sg_descriptor(
4186         struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4187 {
4188         u64 address = (u64)sg_dma_address(sg);
4189         unsigned int length = sg_dma_len(sg);
4190
4191         put_unaligned_le64(address, &sg_descriptor->address);
4192         put_unaligned_le32(length, &sg_descriptor->length);
4193         put_unaligned_le32(0, &sg_descriptor->flags);
4194 }
4195
4196 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4197         struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4198         struct pqi_io_request *io_request)
4199 {
4200         int i;
4201         u16 iu_length;
4202         int sg_count;
4203         bool chained;
4204         unsigned int num_sg_in_iu;
4205         unsigned int max_sg_per_iu;
4206         struct scatterlist *sg;
4207         struct pqi_sg_descriptor *sg_descriptor;
4208
4209         sg_count = scsi_dma_map(scmd);
4210         if (sg_count < 0)
4211                 return sg_count;
4212
4213         iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4214                 PQI_REQUEST_HEADER_LENGTH;
4215
4216         if (sg_count == 0)
4217                 goto out;
4218
4219         sg = scsi_sglist(scmd);
4220         sg_descriptor = request->sg_descriptors;
4221         max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4222         chained = false;
4223         num_sg_in_iu = 0;
4224         i = 0;
4225
4226         while (1) {
4227                 pqi_set_sg_descriptor(sg_descriptor, sg);
4228                 if (!chained)
4229                         num_sg_in_iu++;
4230                 i++;
4231                 if (i == sg_count)
4232                         break;
4233                 sg_descriptor++;
4234                 if (i == max_sg_per_iu) {
4235                         put_unaligned_le64(
4236                                 (u64)io_request->sg_chain_buffer_dma_handle,
4237                                 &sg_descriptor->address);
4238                         put_unaligned_le32((sg_count - num_sg_in_iu)
4239                                 * sizeof(*sg_descriptor),
4240                                 &sg_descriptor->length);
4241                         put_unaligned_le32(CISS_SG_CHAIN,
4242                                 &sg_descriptor->flags);
4243                         chained = true;
4244                         num_sg_in_iu++;
4245                         sg_descriptor = io_request->sg_chain_buffer;
4246                 }
4247                 sg = sg_next(sg);
4248         }
4249
4250         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4251         request->partial = chained;
4252         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4253
4254 out:
4255         put_unaligned_le16(iu_length, &request->header.iu_length);
4256
4257         return 0;
4258 }
4259
4260 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4261         struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4262         struct pqi_io_request *io_request)
4263 {
4264         int i;
4265         u16 iu_length;
4266         int sg_count;
4267         bool chained;
4268         unsigned int num_sg_in_iu;
4269         unsigned int max_sg_per_iu;
4270         struct scatterlist *sg;
4271         struct pqi_sg_descriptor *sg_descriptor;
4272
4273         sg_count = scsi_dma_map(scmd);
4274         if (sg_count < 0)
4275                 return sg_count;
4276
4277         iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4278                 PQI_REQUEST_HEADER_LENGTH;
4279         num_sg_in_iu = 0;
4280
4281         if (sg_count == 0)
4282                 goto out;
4283
4284         sg = scsi_sglist(scmd);
4285         sg_descriptor = request->sg_descriptors;
4286         max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4287         chained = false;
4288         i = 0;
4289
4290         while (1) {
4291                 pqi_set_sg_descriptor(sg_descriptor, sg);
4292                 if (!chained)
4293                         num_sg_in_iu++;
4294                 i++;
4295                 if (i == sg_count)
4296                         break;
4297                 sg_descriptor++;
4298                 if (i == max_sg_per_iu) {
4299                         put_unaligned_le64(
4300                                 (u64)io_request->sg_chain_buffer_dma_handle,
4301                                 &sg_descriptor->address);
4302                         put_unaligned_le32((sg_count - num_sg_in_iu)
4303                                 * sizeof(*sg_descriptor),
4304                                 &sg_descriptor->length);
4305                         put_unaligned_le32(CISS_SG_CHAIN,
4306                                 &sg_descriptor->flags);
4307                         chained = true;
4308                         num_sg_in_iu++;
4309                         sg_descriptor = io_request->sg_chain_buffer;
4310                 }
4311                 sg = sg_next(sg);
4312         }
4313
4314         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4315         request->partial = chained;
4316         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4317
4318 out:
4319         put_unaligned_le16(iu_length, &request->header.iu_length);
4320         request->num_sg_descriptors = num_sg_in_iu;
4321
4322         return 0;
4323 }
4324
4325 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4326         void *context)
4327 {
4328         struct scsi_cmnd *scmd;
4329
4330         scmd = io_request->scmd;
4331         pqi_free_io_request(io_request);
4332         scsi_dma_unmap(scmd);
4333         pqi_scsi_done(scmd);
4334 }
4335
4336 static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4337         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4338         struct pqi_queue_group *queue_group)
4339 {
4340         int rc;
4341         size_t cdb_length;
4342         struct pqi_io_request *io_request;
4343         struct pqi_raid_path_request *request;
4344
4345         io_request = pqi_alloc_io_request(ctrl_info);
4346         io_request->io_complete_callback = pqi_raid_io_complete;
4347         io_request->scmd = scmd;
4348
4349         scmd->host_scribble = (unsigned char *)io_request;
4350
4351         request = io_request->iu;
4352         memset(request, 0,
4353                 offsetof(struct pqi_raid_path_request, sg_descriptors));
4354
4355         request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4356         put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4357         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4358         put_unaligned_le16(io_request->index, &request->request_id);
4359         request->error_index = request->request_id;
4360         memcpy(request->lun_number, device->scsi3addr,
4361                 sizeof(request->lun_number));
4362
4363         cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4364         memcpy(request->cdb, scmd->cmnd, cdb_length);
4365
4366         switch (cdb_length) {
4367         case 6:
4368         case 10:
4369         case 12:
4370         case 16:
4371                 /* No bytes in the Additional CDB bytes field */
4372                 request->additional_cdb_bytes_usage =
4373                         SOP_ADDITIONAL_CDB_BYTES_0;
4374                 break;
4375         case 20:
4376                 /* 4 bytes in the Additional cdb field */
4377                 request->additional_cdb_bytes_usage =
4378                         SOP_ADDITIONAL_CDB_BYTES_4;
4379                 break;
4380         case 24:
4381                 /* 8 bytes in the Additional cdb field */
4382                 request->additional_cdb_bytes_usage =
4383                         SOP_ADDITIONAL_CDB_BYTES_8;
4384                 break;
4385         case 28:
4386                 /* 12 bytes in the Additional cdb field */
4387                 request->additional_cdb_bytes_usage =
4388                         SOP_ADDITIONAL_CDB_BYTES_12;
4389                 break;
4390         case 32:
4391         default:
4392                 /* 16 bytes in the Additional cdb field */
4393                 request->additional_cdb_bytes_usage =
4394                         SOP_ADDITIONAL_CDB_BYTES_16;
4395                 break;
4396         }
4397
4398         switch (scmd->sc_data_direction) {
4399         case DMA_TO_DEVICE:
4400                 request->data_direction = SOP_READ_FLAG;
4401                 break;
4402         case DMA_FROM_DEVICE:
4403                 request->data_direction = SOP_WRITE_FLAG;
4404                 break;
4405         case DMA_NONE:
4406                 request->data_direction = SOP_NO_DIRECTION_FLAG;
4407                 break;
4408         case DMA_BIDIRECTIONAL:
4409                 request->data_direction = SOP_BIDIRECTIONAL;
4410                 break;
4411         default:
4412                 dev_err(&ctrl_info->pci_dev->dev,
4413                         "unknown data direction: %d\n",
4414                         scmd->sc_data_direction);
4415                 WARN_ON(scmd->sc_data_direction);
4416                 break;
4417         }
4418
4419         rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4420         if (rc) {
4421                 pqi_free_io_request(io_request);
4422                 return SCSI_MLQUEUE_HOST_BUSY;
4423         }
4424
4425         pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4426
4427         return 0;
4428 }
4429
4430 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4431         void *context)
4432 {
4433         struct scsi_cmnd *scmd;
4434
4435         scmd = io_request->scmd;
4436         scsi_dma_unmap(scmd);
4437         if (io_request->status == -EAGAIN)
4438                 set_host_byte(scmd, DID_IMM_RETRY);
4439         pqi_free_io_request(io_request);
4440         pqi_scsi_done(scmd);
4441 }
4442
4443 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4444         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4445         struct pqi_queue_group *queue_group)
4446 {
4447         return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4448                 scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4449 }
4450
4451 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4452         struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4453         unsigned int cdb_length, struct pqi_queue_group *queue_group,
4454         struct pqi_encryption_info *encryption_info)
4455 {
4456         int rc;
4457         struct pqi_io_request *io_request;
4458         struct pqi_aio_path_request *request;
4459
4460         io_request = pqi_alloc_io_request(ctrl_info);
4461         io_request->io_complete_callback = pqi_aio_io_complete;
4462         io_request->scmd = scmd;
4463
4464         scmd->host_scribble = (unsigned char *)io_request;
4465
4466         request = io_request->iu;
4467         memset(request, 0,
4468                 offsetof(struct pqi_raid_path_request, sg_descriptors));
4469
4470         request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4471         put_unaligned_le32(aio_handle, &request->nexus_id);
4472         put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4473         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4474         put_unaligned_le16(io_request->index, &request->request_id);
4475         request->error_index = request->request_id;
4476         if (cdb_length > sizeof(request->cdb))
4477                 cdb_length = sizeof(request->cdb);
4478         request->cdb_length = cdb_length;
4479         memcpy(request->cdb, cdb, cdb_length);
4480
4481         switch (scmd->sc_data_direction) {
4482         case DMA_TO_DEVICE:
4483                 request->data_direction = SOP_READ_FLAG;
4484                 break;
4485         case DMA_FROM_DEVICE:
4486                 request->data_direction = SOP_WRITE_FLAG;
4487                 break;
4488         case DMA_NONE:
4489                 request->data_direction = SOP_NO_DIRECTION_FLAG;
4490                 break;
4491         case DMA_BIDIRECTIONAL:
4492                 request->data_direction = SOP_BIDIRECTIONAL;
4493                 break;
4494         default:
4495                 dev_err(&ctrl_info->pci_dev->dev,
4496                         "unknown data direction: %d\n",
4497                         scmd->sc_data_direction);
4498                 WARN_ON(scmd->sc_data_direction);
4499                 break;
4500         }
4501
4502         if (encryption_info) {
4503                 request->encryption_enable = true;
4504                 put_unaligned_le16(encryption_info->data_encryption_key_index,
4505                         &request->data_encryption_key_index);
4506                 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4507                         &request->encrypt_tweak_lower);
4508                 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4509                         &request->encrypt_tweak_upper);
4510         }
4511
4512         rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4513         if (rc) {
4514                 pqi_free_io_request(io_request);
4515                 return SCSI_MLQUEUE_HOST_BUSY;
4516         }
4517
4518         pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4519
4520         return 0;
4521 }
4522
4523 static int pqi_scsi_queue_command(struct Scsi_Host *shost,
4524         struct scsi_cmnd *scmd)
4525 {
4526         int rc;
4527         struct pqi_ctrl_info *ctrl_info;
4528         struct pqi_scsi_dev *device;
4529         u16 hwq;
4530         struct pqi_queue_group *queue_group;
4531         bool raid_bypassed;
4532
4533         device = scmd->device->hostdata;
4534         ctrl_info = shost_to_hba(shost);
4535
4536         if (pqi_ctrl_offline(ctrl_info)) {
4537                 set_host_byte(scmd, DID_NO_CONNECT);
4538                 pqi_scsi_done(scmd);
4539                 return 0;
4540         }
4541
4542         /*
4543          * This is necessary because the SML doesn't zero out this field during
4544          * error recovery.
4545          */
4546         scmd->result = 0;
4547
4548         hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4549         if (hwq >= ctrl_info->num_queue_groups)
4550                 hwq = 0;
4551
4552         queue_group = &ctrl_info->queue_groups[hwq];
4553
4554         if (pqi_is_logical_device(device)) {
4555                 raid_bypassed = false;
4556                 if (device->offload_enabled &&
4557                         scmd->request->cmd_type == REQ_TYPE_FS) {
4558                         rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4559                                 scmd, queue_group);
4560                         if (rc == 0 ||
4561                                 rc == SCSI_MLQUEUE_HOST_BUSY ||
4562                                 rc == SAM_STAT_CHECK_CONDITION ||
4563                                 rc == SAM_STAT_RESERVATION_CONFLICT)
4564                                 raid_bypassed = true;
4565                 }
4566                 if (!raid_bypassed)
4567                         rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4568                                 queue_group);
4569         } else {
4570                 if (device->aio_enabled)
4571                         rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4572                                 queue_group);
4573                 else
4574                         rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4575                                 queue_group);
4576         }
4577
4578         return rc;
4579 }
4580
4581 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
4582         void *context)
4583 {
4584         struct completion *waiting = context;
4585
4586         complete(waiting);
4587 }
4588
4589 #define PQI_LUN_RESET_TIMEOUT_SECS      10
4590
4591 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
4592         struct pqi_scsi_dev *device, struct completion *wait)
4593 {
4594         int rc;
4595         unsigned int wait_secs = 0;
4596
4597         while (1) {
4598                 if (wait_for_completion_io_timeout(wait,
4599                         PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
4600                         rc = 0;
4601                         break;
4602                 }
4603
4604                 pqi_check_ctrl_health(ctrl_info);
4605                 if (pqi_ctrl_offline(ctrl_info)) {
4606                         rc = -ETIMEDOUT;
4607                         break;
4608                 }
4609
4610                 wait_secs += PQI_LUN_RESET_TIMEOUT_SECS;
4611
4612                 dev_err(&ctrl_info->pci_dev->dev,
4613                         "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
4614                         ctrl_info->scsi_host->host_no, device->bus,
4615                         device->target, device->lun, wait_secs);
4616         }
4617
4618         return rc;
4619 }
4620
4621 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
4622         struct pqi_scsi_dev *device)
4623 {
4624         int rc;
4625         struct pqi_io_request *io_request;
4626         DECLARE_COMPLETION_ONSTACK(wait);
4627         struct pqi_task_management_request *request;
4628
4629         down(&ctrl_info->lun_reset_sem);
4630
4631         io_request = pqi_alloc_io_request(ctrl_info);
4632         io_request->io_complete_callback = pqi_lun_reset_complete;
4633         io_request->context = &wait;
4634
4635         request = io_request->iu;
4636         memset(request, 0, sizeof(*request));
4637
4638         request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
4639         put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
4640                 &request->header.iu_length);
4641         put_unaligned_le16(io_request->index, &request->request_id);
4642         memcpy(request->lun_number, device->scsi3addr,
4643                 sizeof(request->lun_number));
4644         request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
4645
4646         pqi_start_io(ctrl_info,
4647                 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4648                 io_request);
4649
4650         rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
4651         if (rc == 0)
4652                 rc = io_request->status;
4653
4654         pqi_free_io_request(io_request);
4655         up(&ctrl_info->lun_reset_sem);
4656
4657         return rc;
4658 }
4659
4660 /* Performs a reset at the LUN level. */
4661
4662 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
4663         struct pqi_scsi_dev *device)
4664 {
4665         int rc;
4666
4667         pqi_check_ctrl_health(ctrl_info);
4668         if (pqi_ctrl_offline(ctrl_info))
4669                 return FAILED;
4670
4671         rc = pqi_lun_reset(ctrl_info, device);
4672
4673         return rc == 0 ? SUCCESS : FAILED;
4674 }
4675
4676 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
4677 {
4678         int rc;
4679         struct pqi_ctrl_info *ctrl_info;
4680         struct pqi_scsi_dev *device;
4681
4682         ctrl_info = shost_to_hba(scmd->device->host);
4683         device = scmd->device->hostdata;
4684
4685         dev_err(&ctrl_info->pci_dev->dev,
4686                 "resetting scsi %d:%d:%d:%d\n",
4687                 ctrl_info->scsi_host->host_no,
4688                 device->bus, device->target, device->lun);
4689
4690         rc = pqi_device_reset(ctrl_info, device);
4691
4692         dev_err(&ctrl_info->pci_dev->dev,
4693                 "reset of scsi %d:%d:%d:%d: %s\n",
4694                 ctrl_info->scsi_host->host_no,
4695                 device->bus, device->target, device->lun,
4696                 rc == SUCCESS ? "SUCCESS" : "FAILED");
4697
4698         return rc;
4699 }
4700
4701 static int pqi_slave_alloc(struct scsi_device *sdev)
4702 {
4703         struct pqi_scsi_dev *device;
4704         unsigned long flags;
4705         struct pqi_ctrl_info *ctrl_info;
4706         struct scsi_target *starget;
4707         struct sas_rphy *rphy;
4708
4709         ctrl_info = shost_to_hba(sdev->host);
4710
4711         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4712
4713         if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
4714                 starget = scsi_target(sdev);
4715                 rphy = target_to_rphy(starget);
4716                 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
4717                 if (device) {
4718                         device->target = sdev_id(sdev);
4719                         device->lun = sdev->lun;
4720                         device->target_lun_valid = true;
4721                 }
4722         } else {
4723                 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
4724                         sdev_id(sdev), sdev->lun);
4725         }
4726
4727         if (device && device->expose_device) {
4728                 sdev->hostdata = device;
4729                 device->sdev = sdev;
4730                 if (device->queue_depth) {
4731                         device->advertised_queue_depth = device->queue_depth;
4732                         scsi_change_queue_depth(sdev,
4733                                 device->advertised_queue_depth);
4734                 }
4735         }
4736
4737         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
4738
4739         return 0;
4740 }
4741
4742 static int pqi_slave_configure(struct scsi_device *sdev)
4743 {
4744         struct pqi_scsi_dev *device;
4745
4746         device = sdev->hostdata;
4747         if (!device->expose_device)
4748                 sdev->no_uld_attach = true;
4749
4750         return 0;
4751 }
4752
4753 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
4754         void __user *arg)
4755 {
4756         struct pci_dev *pci_dev;
4757         u32 subsystem_vendor;
4758         u32 subsystem_device;
4759         cciss_pci_info_struct pciinfo;
4760
4761         if (!arg)
4762                 return -EINVAL;
4763
4764         pci_dev = ctrl_info->pci_dev;
4765
4766         pciinfo.domain = pci_domain_nr(pci_dev->bus);
4767         pciinfo.bus = pci_dev->bus->number;
4768         pciinfo.dev_fn = pci_dev->devfn;
4769         subsystem_vendor = pci_dev->subsystem_vendor;
4770         subsystem_device = pci_dev->subsystem_device;
4771         pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
4772                 subsystem_vendor;
4773
4774         if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
4775                 return -EFAULT;
4776
4777         return 0;
4778 }
4779
4780 static int pqi_getdrivver_ioctl(void __user *arg)
4781 {
4782         u32 version;
4783
4784         if (!arg)
4785                 return -EINVAL;
4786
4787         version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
4788                 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
4789
4790         if (copy_to_user(arg, &version, sizeof(version)))
4791                 return -EFAULT;
4792
4793         return 0;
4794 }
4795
4796 struct ciss_error_info {
4797         u8      scsi_status;
4798         int     command_status;
4799         size_t  sense_data_length;
4800 };
4801
4802 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
4803         struct ciss_error_info *ciss_error_info)
4804 {
4805         int ciss_cmd_status;
4806         size_t sense_data_length;
4807
4808         switch (pqi_error_info->data_out_result) {
4809         case PQI_DATA_IN_OUT_GOOD:
4810                 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
4811                 break;
4812         case PQI_DATA_IN_OUT_UNDERFLOW:
4813                 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
4814                 break;
4815         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
4816                 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
4817                 break;
4818         case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
4819         case PQI_DATA_IN_OUT_BUFFER_ERROR:
4820         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
4821         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
4822         case PQI_DATA_IN_OUT_ERROR:
4823                 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
4824                 break;
4825         case PQI_DATA_IN_OUT_HARDWARE_ERROR:
4826         case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
4827         case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
4828         case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
4829         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
4830         case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
4831         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
4832         case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
4833         case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
4834         case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
4835                 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
4836                 break;
4837         case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
4838                 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
4839                 break;
4840         case PQI_DATA_IN_OUT_ABORTED:
4841                 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
4842                 break;
4843         case PQI_DATA_IN_OUT_TIMEOUT:
4844                 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
4845                 break;
4846         default:
4847                 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
4848                 break;
4849         }
4850
4851         sense_data_length =
4852                 get_unaligned_le16(&pqi_error_info->sense_data_length);
4853         if (sense_data_length == 0)
4854                 sense_data_length =
4855                 get_unaligned_le16(&pqi_error_info->response_data_length);
4856         if (sense_data_length)
4857                 if (sense_data_length > sizeof(pqi_error_info->data))
4858                         sense_data_length = sizeof(pqi_error_info->data);
4859
4860         ciss_error_info->scsi_status = pqi_error_info->status;
4861         ciss_error_info->command_status = ciss_cmd_status;
4862         ciss_error_info->sense_data_length = sense_data_length;
4863 }
4864
4865 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
4866 {
4867         int rc;
4868         char *kernel_buffer = NULL;
4869         u16 iu_length;
4870         size_t sense_data_length;
4871         IOCTL_Command_struct iocommand;
4872         struct pqi_raid_path_request request;
4873         struct pqi_raid_error_info pqi_error_info;
4874         struct ciss_error_info ciss_error_info;
4875
4876         if (pqi_ctrl_offline(ctrl_info))
4877                 return -ENXIO;
4878         if (!arg)
4879                 return -EINVAL;
4880         if (!capable(CAP_SYS_RAWIO))
4881                 return -EPERM;
4882         if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
4883                 return -EFAULT;
4884         if (iocommand.buf_size < 1 &&
4885                 iocommand.Request.Type.Direction != XFER_NONE)
4886                 return -EINVAL;
4887         if (iocommand.Request.CDBLen > sizeof(request.cdb))
4888                 return -EINVAL;
4889         if (iocommand.Request.Type.Type != TYPE_CMD)
4890                 return -EINVAL;
4891
4892         switch (iocommand.Request.Type.Direction) {
4893         case XFER_NONE:
4894         case XFER_WRITE:
4895         case XFER_READ:
4896                 break;
4897         default:
4898                 return -EINVAL;
4899         }
4900
4901         if (iocommand.buf_size > 0) {
4902                 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
4903                 if (!kernel_buffer)
4904                         return -ENOMEM;
4905                 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4906                         if (copy_from_user(kernel_buffer, iocommand.buf,
4907                                 iocommand.buf_size)) {
4908                                 rc = -EFAULT;
4909                                 goto out;
4910                         }
4911                 } else {
4912                         memset(kernel_buffer, 0, iocommand.buf_size);
4913                 }
4914         }
4915
4916         memset(&request, 0, sizeof(request));
4917
4918         request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4919         iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4920                 PQI_REQUEST_HEADER_LENGTH;
4921         memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
4922                 sizeof(request.lun_number));
4923         memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
4924         request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
4925
4926         switch (iocommand.Request.Type.Direction) {
4927         case XFER_NONE:
4928                 request.data_direction = SOP_NO_DIRECTION_FLAG;
4929                 break;
4930         case XFER_WRITE:
4931                 request.data_direction = SOP_WRITE_FLAG;
4932                 break;
4933         case XFER_READ:
4934                 request.data_direction = SOP_READ_FLAG;
4935                 break;
4936         }
4937
4938         request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4939
4940         if (iocommand.buf_size > 0) {
4941                 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
4942
4943                 rc = pqi_map_single(ctrl_info->pci_dev,
4944                         &request.sg_descriptors[0], kernel_buffer,
4945                         iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4946                 if (rc)
4947                         goto out;
4948
4949                 iu_length += sizeof(request.sg_descriptors[0]);
4950         }
4951
4952         put_unaligned_le16(iu_length, &request.header.iu_length);
4953
4954         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4955                 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
4956
4957         if (iocommand.buf_size > 0)
4958                 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
4959                         PCI_DMA_BIDIRECTIONAL);
4960
4961         memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
4962
4963         if (rc == 0) {
4964                 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
4965                 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
4966                 iocommand.error_info.CommandStatus =
4967                         ciss_error_info.command_status;
4968                 sense_data_length = ciss_error_info.sense_data_length;
4969                 if (sense_data_length) {
4970                         if (sense_data_length >
4971                                 sizeof(iocommand.error_info.SenseInfo))
4972                                 sense_data_length =
4973                                         sizeof(iocommand.error_info.SenseInfo);
4974                         memcpy(iocommand.error_info.SenseInfo,
4975                                 pqi_error_info.data, sense_data_length);
4976                         iocommand.error_info.SenseLen = sense_data_length;
4977                 }
4978         }
4979
4980         if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
4981                 rc = -EFAULT;
4982                 goto out;
4983         }
4984
4985         if (rc == 0 && iocommand.buf_size > 0 &&
4986                 (iocommand.Request.Type.Direction & XFER_READ)) {
4987                 if (copy_to_user(iocommand.buf, kernel_buffer,
4988                         iocommand.buf_size)) {
4989                         rc = -EFAULT;
4990                 }
4991         }
4992
4993 out:
4994         kfree(kernel_buffer);
4995
4996         return rc;
4997 }
4998
4999 static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5000 {
5001         int rc;
5002         struct pqi_ctrl_info *ctrl_info;
5003
5004         ctrl_info = shost_to_hba(sdev->host);
5005
5006         switch (cmd) {
5007         case CCISS_DEREGDISK:
5008         case CCISS_REGNEWDISK:
5009         case CCISS_REGNEWD:
5010                 rc = pqi_scan_scsi_devices(ctrl_info);
5011                 break;
5012         case CCISS_GETPCIINFO:
5013                 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5014                 break;
5015         case CCISS_GETDRIVVER:
5016                 rc = pqi_getdrivver_ioctl(arg);
5017                 break;
5018         case CCISS_PASSTHRU:
5019                 rc = pqi_passthru_ioctl(ctrl_info, arg);
5020                 break;
5021         default:
5022                 rc = -EINVAL;
5023                 break;
5024         }
5025
5026         return rc;
5027 }
5028
5029 static ssize_t pqi_version_show(struct device *dev,
5030         struct device_attribute *attr, char *buffer)
5031 {
5032         ssize_t count = 0;
5033         struct Scsi_Host *shost;
5034         struct pqi_ctrl_info *ctrl_info;
5035
5036         shost = class_to_shost(dev);
5037         ctrl_info = shost_to_hba(shost);
5038
5039         count += snprintf(buffer + count, PAGE_SIZE - count,
5040                 "  driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5041
5042         count += snprintf(buffer + count, PAGE_SIZE - count,
5043                 "firmware: %s\n", ctrl_info->firmware_version);
5044
5045         return count;
5046 }
5047
5048 static ssize_t pqi_host_rescan_store(struct device *dev,
5049         struct device_attribute *attr, const char *buffer, size_t count)
5050 {
5051         struct Scsi_Host *shost = class_to_shost(dev);
5052
5053         pqi_scan_start(shost);
5054
5055         return count;
5056 }
5057
5058 static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
5059 static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
5060
5061 static struct device_attribute *pqi_shost_attrs[] = {
5062         &dev_attr_version,
5063         &dev_attr_rescan,
5064         NULL
5065 };
5066
5067 static ssize_t pqi_sas_address_show(struct device *dev,
5068         struct device_attribute *attr, char *buffer)
5069 {
5070         struct pqi_ctrl_info *ctrl_info;
5071         struct scsi_device *sdev;
5072         struct pqi_scsi_dev *device;
5073         unsigned long flags;
5074         u64 sas_address;
5075
5076         sdev = to_scsi_device(dev);
5077         ctrl_info = shost_to_hba(sdev->host);
5078
5079         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5080
5081         device = sdev->hostdata;
5082         if (pqi_is_logical_device(device)) {
5083                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5084                         flags);
5085                 return -ENODEV;
5086         }
5087         sas_address = device->sas_address;
5088
5089         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5090
5091         return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5092 }
5093
5094 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5095         struct device_attribute *attr, char *buffer)
5096 {
5097         struct pqi_ctrl_info *ctrl_info;
5098         struct scsi_device *sdev;
5099         struct pqi_scsi_dev *device;
5100         unsigned long flags;
5101
5102         sdev = to_scsi_device(dev);
5103         ctrl_info = shost_to_hba(sdev->host);
5104
5105         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5106
5107         device = sdev->hostdata;
5108         buffer[0] = device->offload_enabled ? '1' : '0';
5109         buffer[1] = '\n';
5110         buffer[2] = '\0';
5111
5112         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5113
5114         return 2;
5115 }
5116
5117 static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
5118 static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
5119         pqi_ssd_smart_path_enabled_show, NULL);
5120
5121 static struct device_attribute *pqi_sdev_attrs[] = {
5122         &dev_attr_sas_address,
5123         &dev_attr_ssd_smart_path_enabled,
5124         NULL
5125 };
5126
5127 static struct scsi_host_template pqi_driver_template = {
5128         .module = THIS_MODULE,
5129         .name = DRIVER_NAME_SHORT,
5130         .proc_name = DRIVER_NAME_SHORT,
5131         .queuecommand = pqi_scsi_queue_command,
5132         .scan_start = pqi_scan_start,
5133         .scan_finished = pqi_scan_finished,
5134         .this_id = -1,
5135         .use_clustering = ENABLE_CLUSTERING,
5136         .eh_device_reset_handler = pqi_eh_device_reset_handler,
5137         .ioctl = pqi_ioctl,
5138         .slave_alloc = pqi_slave_alloc,
5139         .slave_configure = pqi_slave_configure,
5140         .sdev_attrs = pqi_sdev_attrs,
5141         .shost_attrs = pqi_shost_attrs,
5142 };
5143
5144 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5145 {
5146         int rc;
5147         struct Scsi_Host *shost;
5148
5149         shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5150         if (!shost) {
5151                 dev_err(&ctrl_info->pci_dev->dev,
5152                         "scsi_host_alloc failed for controller %u\n",
5153                         ctrl_info->ctrl_id);
5154                 return -ENOMEM;
5155         }
5156
5157         shost->io_port = 0;
5158         shost->n_io_port = 0;
5159         shost->this_id = -1;
5160         shost->max_channel = PQI_MAX_BUS;
5161         shost->max_cmd_len = MAX_COMMAND_SIZE;
5162         shost->max_lun = ~0;
5163         shost->max_id = ~0;
5164         shost->max_sectors = ctrl_info->max_sectors;
5165         shost->can_queue = ctrl_info->scsi_ml_can_queue;
5166         shost->cmd_per_lun = shost->can_queue;
5167         shost->sg_tablesize = ctrl_info->sg_tablesize;
5168         shost->transportt = pqi_sas_transport_template;
5169         shost->irq = ctrl_info->msix_vectors[0];
5170         shost->unique_id = shost->irq;
5171         shost->nr_hw_queues = ctrl_info->num_queue_groups;
5172         shost->hostdata[0] = (unsigned long)ctrl_info;
5173
5174         rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5175         if (rc) {
5176                 dev_err(&ctrl_info->pci_dev->dev,
5177                         "scsi_add_host failed for controller %u\n",
5178                         ctrl_info->ctrl_id);
5179                 goto free_host;
5180         }
5181
5182         rc = pqi_add_sas_host(shost, ctrl_info);
5183         if (rc) {
5184                 dev_err(&ctrl_info->pci_dev->dev,
5185                         "add SAS host failed for controller %u\n",
5186                         ctrl_info->ctrl_id);
5187                 goto remove_host;
5188         }
5189
5190         ctrl_info->scsi_host = shost;
5191
5192         return 0;
5193
5194 remove_host:
5195         scsi_remove_host(shost);
5196 free_host:
5197         scsi_host_put(shost);
5198
5199         return rc;
5200 }
5201
5202 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5203 {
5204         struct Scsi_Host *shost;
5205
5206         pqi_delete_sas_host(ctrl_info);
5207
5208         shost = ctrl_info->scsi_host;
5209         if (!shost)
5210                 return;
5211
5212         scsi_remove_host(shost);
5213         scsi_host_put(shost);
5214 }
5215
5216 #define PQI_RESET_ACTION_RESET          0x1
5217
5218 #define PQI_RESET_TYPE_NO_RESET         0x0
5219 #define PQI_RESET_TYPE_SOFT_RESET       0x1
5220 #define PQI_RESET_TYPE_FIRM_RESET       0x2
5221 #define PQI_RESET_TYPE_HARD_RESET       0x3
5222
5223 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5224 {
5225         int rc;
5226         u32 reset_params;
5227
5228         reset_params = (PQI_RESET_ACTION_RESET << 5) |
5229                 PQI_RESET_TYPE_HARD_RESET;
5230
5231         writel(reset_params,
5232                 &ctrl_info->pqi_registers->device_reset);
5233
5234         rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5235         if (rc)
5236                 dev_err(&ctrl_info->pci_dev->dev,
5237                         "PQI reset failed\n");
5238
5239         return rc;
5240 }
5241
5242 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5243 {
5244         int rc;
5245         struct bmic_identify_controller *identify;
5246
5247         identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5248         if (!identify)
5249                 return -ENOMEM;
5250
5251         rc = pqi_identify_controller(ctrl_info, identify);
5252         if (rc)
5253                 goto out;
5254
5255         memcpy(ctrl_info->firmware_version, identify->firmware_version,
5256                 sizeof(identify->firmware_version));
5257         ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5258         snprintf(ctrl_info->firmware_version +
5259                 strlen(ctrl_info->firmware_version),
5260                 sizeof(ctrl_info->firmware_version),
5261                 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5262
5263 out:
5264         kfree(identify);
5265
5266         return rc;
5267 }
5268
5269 static int pqi_kdump_init(struct pqi_ctrl_info *ctrl_info)
5270 {
5271         if (!sis_is_firmware_running(ctrl_info))
5272                 return -ENXIO;
5273
5274         if (pqi_get_ctrl_mode(ctrl_info) == PQI_MODE) {
5275                 sis_disable_msix(ctrl_info);
5276                 if (pqi_reset(ctrl_info) == 0)
5277                         sis_reenable_sis_mode(ctrl_info);
5278         }
5279
5280         return 0;
5281 }
5282
5283 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5284 {
5285         int rc;
5286
5287         if (reset_devices) {
5288                 rc = pqi_kdump_init(ctrl_info);
5289                 if (rc)
5290                         return rc;
5291         }
5292
5293         /*
5294          * When the controller comes out of reset, it is always running
5295          * in legacy SIS mode.  This is so that it can be compatible
5296          * with legacy drivers shipped with OSes.  So we have to talk
5297          * to it using SIS commands at first.  Once we are satisified
5298          * that the controller supports PQI, we transition it into PQI
5299          * mode.
5300          */
5301
5302         /*
5303          * Wait until the controller is ready to start accepting SIS
5304          * commands.
5305          */
5306         rc = sis_wait_for_ctrl_ready(ctrl_info);
5307         if (rc) {
5308                 dev_err(&ctrl_info->pci_dev->dev,
5309                         "error initializing SIS interface\n");
5310                 return rc;
5311         }
5312
5313         /*
5314          * Get the controller properties.  This allows us to determine
5315          * whether or not it supports PQI mode.
5316          */
5317         rc = sis_get_ctrl_properties(ctrl_info);
5318         if (rc) {
5319                 dev_err(&ctrl_info->pci_dev->dev,
5320                         "error obtaining controller properties\n");
5321                 return rc;
5322         }
5323
5324         rc = sis_get_pqi_capabilities(ctrl_info);
5325         if (rc) {
5326                 dev_err(&ctrl_info->pci_dev->dev,
5327                         "error obtaining controller capabilities\n");
5328                 return rc;
5329         }
5330
5331         if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
5332                 ctrl_info->max_outstanding_requests =
5333                         PQI_MAX_OUTSTANDING_REQUESTS;
5334
5335         pqi_calculate_io_resources(ctrl_info);
5336
5337         rc = pqi_alloc_error_buffer(ctrl_info);
5338         if (rc) {
5339                 dev_err(&ctrl_info->pci_dev->dev,
5340                         "failed to allocate PQI error buffer\n");
5341                 return rc;
5342         }
5343
5344         /*
5345          * If the function we are about to call succeeds, the
5346          * controller will transition from legacy SIS mode
5347          * into PQI mode.
5348          */
5349         rc = sis_init_base_struct_addr(ctrl_info);
5350         if (rc) {
5351                 dev_err(&ctrl_info->pci_dev->dev,
5352                         "error initializing PQI mode\n");
5353                 return rc;
5354         }
5355
5356         /* Wait for the controller to complete the SIS -> PQI transition. */
5357         rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5358         if (rc) {
5359                 dev_err(&ctrl_info->pci_dev->dev,
5360                         "transition to PQI mode failed\n");
5361                 return rc;
5362         }
5363
5364         /* From here on, we are running in PQI mode. */
5365         ctrl_info->pqi_mode_enabled = true;
5366         pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
5367
5368         rc = pqi_alloc_admin_queues(ctrl_info);
5369         if (rc) {
5370                 dev_err(&ctrl_info->pci_dev->dev,
5371                         "error allocating admin queues\n");
5372                 return rc;
5373         }
5374
5375         rc = pqi_create_admin_queues(ctrl_info);
5376         if (rc) {
5377                 dev_err(&ctrl_info->pci_dev->dev,
5378                         "error creating admin queues\n");
5379                 return rc;
5380         }
5381
5382         rc = pqi_report_device_capability(ctrl_info);
5383         if (rc) {
5384                 dev_err(&ctrl_info->pci_dev->dev,
5385                         "obtaining device capability failed\n");
5386                 return rc;
5387         }
5388
5389         rc = pqi_validate_device_capability(ctrl_info);
5390         if (rc)
5391                 return rc;
5392
5393         pqi_calculate_queue_resources(ctrl_info);
5394
5395         rc = pqi_enable_msix_interrupts(ctrl_info);
5396         if (rc)
5397                 return rc;
5398
5399         if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5400                 ctrl_info->max_msix_vectors =
5401                         ctrl_info->num_msix_vectors_enabled;
5402                 pqi_calculate_queue_resources(ctrl_info);
5403         }
5404
5405         rc = pqi_alloc_io_resources(ctrl_info);
5406         if (rc)
5407                 return rc;
5408
5409         rc = pqi_alloc_operational_queues(ctrl_info);
5410         if (rc)
5411                 return rc;
5412
5413         pqi_init_operational_queues(ctrl_info);
5414
5415         rc = pqi_request_irqs(ctrl_info);
5416         if (rc)
5417                 return rc;
5418
5419         pqi_irq_set_affinity_hint(ctrl_info);
5420
5421         rc = pqi_create_queues(ctrl_info);
5422         if (rc)
5423                 return rc;
5424
5425         sis_enable_msix(ctrl_info);
5426
5427         rc = pqi_configure_events(ctrl_info);
5428         if (rc) {
5429                 dev_err(&ctrl_info->pci_dev->dev,
5430                         "error configuring events\n");
5431                 return rc;
5432         }
5433
5434         pqi_start_heartbeat_timer(ctrl_info);
5435
5436         ctrl_info->controller_online = true;
5437
5438         /* Register with the SCSI subsystem. */
5439         rc = pqi_register_scsi(ctrl_info);
5440         if (rc)
5441                 return rc;
5442
5443         rc = pqi_get_ctrl_firmware_version(ctrl_info);
5444         if (rc) {
5445                 dev_err(&ctrl_info->pci_dev->dev,
5446                         "error obtaining firmware version\n");
5447                 return rc;
5448         }
5449
5450         rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5451         if (rc) {
5452                 dev_err(&ctrl_info->pci_dev->dev,
5453                         "error updating host wellness\n");
5454                 return rc;
5455         }
5456
5457         pqi_schedule_update_time_worker(ctrl_info);
5458
5459         pqi_scan_scsi_devices(ctrl_info);
5460
5461         return 0;
5462 }
5463
5464 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
5465 {
5466         int rc;
5467         u64 mask;
5468
5469         rc = pci_enable_device(ctrl_info->pci_dev);
5470         if (rc) {
5471                 dev_err(&ctrl_info->pci_dev->dev,
5472                         "failed to enable PCI device\n");
5473                 return rc;
5474         }
5475
5476         if (sizeof(dma_addr_t) > 4)
5477                 mask = DMA_BIT_MASK(64);
5478         else
5479                 mask = DMA_BIT_MASK(32);
5480
5481         rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
5482         if (rc) {
5483                 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
5484                 goto disable_device;
5485         }
5486
5487         rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
5488         if (rc) {
5489                 dev_err(&ctrl_info->pci_dev->dev,
5490                         "failed to obtain PCI resources\n");
5491                 goto disable_device;
5492         }
5493
5494         ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
5495                 ctrl_info->pci_dev, 0),
5496                 sizeof(struct pqi_ctrl_registers));
5497         if (!ctrl_info->iomem_base) {
5498                 dev_err(&ctrl_info->pci_dev->dev,
5499                         "failed to map memory for controller registers\n");
5500                 rc = -ENOMEM;
5501                 goto release_regions;
5502         }
5503
5504         ctrl_info->registers = ctrl_info->iomem_base;
5505         ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
5506
5507         /* Enable bus mastering. */
5508         pci_set_master(ctrl_info->pci_dev);
5509
5510         pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
5511
5512         return 0;
5513
5514 release_regions:
5515         pci_release_regions(ctrl_info->pci_dev);
5516 disable_device:
5517         pci_disable_device(ctrl_info->pci_dev);
5518
5519         return rc;
5520 }
5521
5522 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
5523 {
5524         iounmap(ctrl_info->iomem_base);
5525         pci_release_regions(ctrl_info->pci_dev);
5526         pci_disable_device(ctrl_info->pci_dev);
5527         pci_set_drvdata(ctrl_info->pci_dev, NULL);
5528 }
5529
5530 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
5531 {
5532         struct pqi_ctrl_info *ctrl_info;
5533
5534         ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
5535                         GFP_KERNEL, numa_node);
5536         if (!ctrl_info)
5537                 return NULL;
5538
5539         mutex_init(&ctrl_info->scan_mutex);
5540
5541         INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
5542         spin_lock_init(&ctrl_info->scsi_device_list_lock);
5543
5544         INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
5545         atomic_set(&ctrl_info->num_interrupts, 0);
5546
5547         INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
5548         INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
5549
5550         sema_init(&ctrl_info->sync_request_sem,
5551                 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
5552         sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
5553
5554         ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
5555         ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
5556
5557         return ctrl_info;
5558 }
5559
5560 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
5561 {
5562         kfree(ctrl_info);
5563 }
5564
5565 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
5566 {
5567         pqi_irq_unset_affinity_hint(ctrl_info);
5568         pqi_free_irqs(ctrl_info);
5569         if (ctrl_info->num_msix_vectors_enabled)
5570                 pci_disable_msix(ctrl_info->pci_dev);
5571 }
5572
5573 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
5574 {
5575         pqi_stop_heartbeat_timer(ctrl_info);
5576         pqi_free_interrupts(ctrl_info);
5577         if (ctrl_info->queue_memory_base)
5578                 dma_free_coherent(&ctrl_info->pci_dev->dev,
5579                         ctrl_info->queue_memory_length,
5580                         ctrl_info->queue_memory_base,
5581                         ctrl_info->queue_memory_base_dma_handle);
5582         if (ctrl_info->admin_queue_memory_base)
5583                 dma_free_coherent(&ctrl_info->pci_dev->dev,
5584                         ctrl_info->admin_queue_memory_length,
5585                         ctrl_info->admin_queue_memory_base,
5586                         ctrl_info->admin_queue_memory_base_dma_handle);
5587         pqi_free_all_io_requests(ctrl_info);
5588         if (ctrl_info->error_buffer)
5589                 dma_free_coherent(&ctrl_info->pci_dev->dev,
5590                         ctrl_info->error_buffer_length,
5591                         ctrl_info->error_buffer,
5592                         ctrl_info->error_buffer_dma_handle);
5593         if (ctrl_info->iomem_base)
5594                 pqi_cleanup_pci_init(ctrl_info);
5595         pqi_free_ctrl_info(ctrl_info);
5596 }
5597
5598 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
5599 {
5600         cancel_delayed_work_sync(&ctrl_info->rescan_work);
5601         cancel_delayed_work_sync(&ctrl_info->update_time_work);
5602         pqi_remove_all_scsi_devices(ctrl_info);
5603         pqi_unregister_scsi(ctrl_info);
5604
5605         if (ctrl_info->pqi_mode_enabled) {
5606                 sis_disable_msix(ctrl_info);
5607                 if (pqi_reset(ctrl_info) == 0)
5608                         sis_reenable_sis_mode(ctrl_info);
5609         }
5610         pqi_free_ctrl_resources(ctrl_info);
5611 }
5612
5613 static void pqi_print_ctrl_info(struct pci_dev *pdev,
5614         const struct pci_device_id *id)
5615 {
5616         char *ctrl_description;
5617
5618         if (id->driver_data) {
5619                 ctrl_description = (char *)id->driver_data;
5620         } else {
5621                 switch (id->subvendor) {
5622                 case PCI_VENDOR_ID_HP:
5623                         ctrl_description = hpe_branded_controller;
5624                         break;
5625                 case PCI_VENDOR_ID_ADAPTEC2:
5626                 default:
5627                         ctrl_description = microsemi_branded_controller;
5628                         break;
5629                 }
5630         }
5631
5632         dev_info(&pdev->dev, "%s found\n", ctrl_description);
5633 }
5634
5635 static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5636 {
5637         int rc;
5638         int node;
5639         struct pqi_ctrl_info *ctrl_info;
5640
5641         pqi_print_ctrl_info(pdev, id);
5642
5643         if (pqi_disable_device_id_wildcards &&
5644                 id->subvendor == PCI_ANY_ID &&
5645                 id->subdevice == PCI_ANY_ID) {
5646                 dev_warn(&pdev->dev,
5647                         "controller not probed because device ID wildcards are disabled\n");
5648                 return -ENODEV;
5649         }
5650
5651         if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
5652                 dev_warn(&pdev->dev,
5653                         "controller device ID matched using wildcards\n");
5654
5655         node = dev_to_node(&pdev->dev);
5656         if (node == NUMA_NO_NODE)
5657                 set_dev_node(&pdev->dev, 0);
5658
5659         ctrl_info = pqi_alloc_ctrl_info(node);
5660         if (!ctrl_info) {
5661                 dev_err(&pdev->dev,
5662                         "failed to allocate controller info block\n");
5663                 return -ENOMEM;
5664         }
5665
5666         ctrl_info->pci_dev = pdev;
5667
5668         rc = pqi_pci_init(ctrl_info);
5669         if (rc)
5670                 goto error;
5671
5672         rc = pqi_ctrl_init(ctrl_info);
5673         if (rc)
5674                 goto error;
5675
5676         return 0;
5677
5678 error:
5679         pqi_remove_ctrl(ctrl_info);
5680
5681         return rc;
5682 }
5683
5684 static void pqi_pci_remove(struct pci_dev *pdev)
5685 {
5686         struct pqi_ctrl_info *ctrl_info;
5687
5688         ctrl_info = pci_get_drvdata(pdev);
5689         if (!ctrl_info)
5690                 return;
5691
5692         pqi_remove_ctrl(ctrl_info);
5693 }
5694
5695 static void pqi_shutdown(struct pci_dev *pdev)
5696 {
5697         int rc;
5698         struct pqi_ctrl_info *ctrl_info;
5699
5700         ctrl_info = pci_get_drvdata(pdev);
5701         if (!ctrl_info)
5702                 goto error;
5703
5704         /*
5705          * Write all data in the controller's battery-backed cache to
5706          * storage.
5707          */
5708         rc = pqi_flush_cache(ctrl_info);
5709         if (rc == 0)
5710                 return;
5711
5712 error:
5713         dev_warn(&pdev->dev,
5714                 "unable to flush controller cache\n");
5715 }
5716
5717 /* Define the PCI IDs for the controllers that we support. */
5718 static const struct pci_device_id pqi_pci_id_table[] = {
5719         {
5720                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5721                                PCI_VENDOR_ID_ADAPTEC2, 0x0110)
5722         },
5723         {
5724                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5725                                PCI_VENDOR_ID_HP, 0x0600)
5726         },
5727         {
5728                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5729                                PCI_VENDOR_ID_HP, 0x0601)
5730         },
5731         {
5732                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5733                                PCI_VENDOR_ID_HP, 0x0602)
5734         },
5735         {
5736                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5737                                PCI_VENDOR_ID_HP, 0x0603)
5738         },
5739         {
5740                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5741                                PCI_VENDOR_ID_HP, 0x0650)
5742         },
5743         {
5744                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5745                                PCI_VENDOR_ID_HP, 0x0651)
5746         },
5747         {
5748                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5749                                PCI_VENDOR_ID_HP, 0x0652)
5750         },
5751         {
5752                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5753                                PCI_VENDOR_ID_HP, 0x0653)
5754         },
5755         {
5756                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5757                                PCI_VENDOR_ID_HP, 0x0654)
5758         },
5759         {
5760                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5761                                PCI_VENDOR_ID_HP, 0x0655)
5762         },
5763         {
5764                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5765                                PCI_VENDOR_ID_HP, 0x0700)
5766         },
5767         {
5768                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5769                                PCI_VENDOR_ID_HP, 0x0701)
5770         },
5771         {
5772                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5773                                PCI_VENDOR_ID_ADAPTEC2, 0x0800)
5774         },
5775         {
5776                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5777                                PCI_VENDOR_ID_ADAPTEC2, 0x0801)
5778         },
5779         {
5780                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5781                                PCI_VENDOR_ID_ADAPTEC2, 0x0802)
5782         },
5783         {
5784                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5785                                PCI_VENDOR_ID_ADAPTEC2, 0x0803)
5786         },
5787         {
5788                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5789                                PCI_VENDOR_ID_ADAPTEC2, 0x0804)
5790         },
5791         {
5792                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5793                                PCI_VENDOR_ID_ADAPTEC2, 0x0805)
5794         },
5795         {
5796                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5797                                PCI_VENDOR_ID_ADAPTEC2, 0x0900)
5798         },
5799         {
5800                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5801                                PCI_VENDOR_ID_ADAPTEC2, 0x0901)
5802         },
5803         {
5804                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5805                                PCI_VENDOR_ID_ADAPTEC2, 0x0902)
5806         },
5807         {
5808                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5809                                PCI_VENDOR_ID_ADAPTEC2, 0x0903)
5810         },
5811         {
5812                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5813                                PCI_VENDOR_ID_ADAPTEC2, 0x0904)
5814         },
5815         {
5816                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5817                                PCI_VENDOR_ID_ADAPTEC2, 0x0905)
5818         },
5819         {
5820                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5821                                PCI_VENDOR_ID_ADAPTEC2, 0x0906)
5822         },
5823         {
5824                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5825                                PCI_VENDOR_ID_HP, 0x1001)
5826         },
5827         {
5828                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5829                                PCI_VENDOR_ID_HP, 0x1100)
5830         },
5831         {
5832                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5833                                PCI_VENDOR_ID_HP, 0x1101)
5834         },
5835         {
5836                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5837                                PCI_VENDOR_ID_HP, 0x1102)
5838         },
5839         {
5840                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5841                                PCI_VENDOR_ID_HP, 0x1150)
5842         },
5843         {
5844                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5845                                PCI_ANY_ID, PCI_ANY_ID)
5846         },
5847         { 0 }
5848 };
5849
5850 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
5851
5852 static struct pci_driver pqi_pci_driver = {
5853         .name = DRIVER_NAME_SHORT,
5854         .id_table = pqi_pci_id_table,
5855         .probe = pqi_pci_probe,
5856         .remove = pqi_pci_remove,
5857         .shutdown = pqi_shutdown,
5858 };
5859
5860 static int __init pqi_init(void)
5861 {
5862         int rc;
5863
5864         pr_info(DRIVER_NAME "\n");
5865
5866         pqi_sas_transport_template =
5867                 sas_attach_transport(&pqi_sas_transport_functions);
5868         if (!pqi_sas_transport_template)
5869                 return -ENODEV;
5870
5871         rc = pci_register_driver(&pqi_pci_driver);
5872         if (rc)
5873                 sas_release_transport(pqi_sas_transport_template);
5874
5875         return rc;
5876 }
5877
5878 static void __exit pqi_cleanup(void)
5879 {
5880         pci_unregister_driver(&pqi_pci_driver);
5881         sas_release_transport(pqi_sas_transport_template);
5882 }
5883
5884 module_init(pqi_init);
5885 module_exit(pqi_cleanup);
5886
5887 static void __attribute__((unused)) verify_structures(void)
5888 {
5889         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5890                 sis_host_to_ctrl_doorbell) != 0x20);
5891         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5892                 sis_interrupt_mask) != 0x34);
5893         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5894                 sis_ctrl_to_host_doorbell) != 0x9c);
5895         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5896                 sis_ctrl_to_host_doorbell_clear) != 0xa0);
5897         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5898                 sis_driver_scratch) != 0xb0);
5899         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5900                 sis_firmware_status) != 0xbc);
5901         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5902                 sis_mailbox) != 0x1000);
5903         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5904                 pqi_registers) != 0x4000);
5905
5906         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5907                 iu_type) != 0x0);
5908         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5909                 iu_length) != 0x2);
5910         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5911                 response_queue_id) != 0x4);
5912         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5913                 work_area) != 0x6);
5914         BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
5915
5916         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5917                 status) != 0x0);
5918         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5919                 service_response) != 0x1);
5920         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5921                 data_present) != 0x2);
5922         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5923                 reserved) != 0x3);
5924         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5925                 residual_count) != 0x4);
5926         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5927                 data_length) != 0x8);
5928         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5929                 reserved1) != 0xa);
5930         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5931                 data) != 0xc);
5932         BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
5933
5934         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5935                 data_in_result) != 0x0);
5936         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5937                 data_out_result) != 0x1);
5938         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5939                 reserved) != 0x2);
5940         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5941                 status) != 0x5);
5942         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5943                 status_qualifier) != 0x6);
5944         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5945                 sense_data_length) != 0x8);
5946         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5947                 response_data_length) != 0xa);
5948         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5949                 data_in_transferred) != 0xc);
5950         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5951                 data_out_transferred) != 0x10);
5952         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5953                 data) != 0x14);
5954         BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
5955
5956         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5957                 signature) != 0x0);
5958         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5959                 function_and_status_code) != 0x8);
5960         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5961                 max_admin_iq_elements) != 0x10);
5962         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5963                 max_admin_oq_elements) != 0x11);
5964         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5965                 admin_iq_element_length) != 0x12);
5966         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5967                 admin_oq_element_length) != 0x13);
5968         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5969                 max_reset_timeout) != 0x14);
5970         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5971                 legacy_intx_status) != 0x18);
5972         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5973                 legacy_intx_mask_set) != 0x1c);
5974         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5975                 legacy_intx_mask_clear) != 0x20);
5976         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5977                 device_status) != 0x40);
5978         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5979                 admin_iq_pi_offset) != 0x48);
5980         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5981                 admin_oq_ci_offset) != 0x50);
5982         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5983                 admin_iq_element_array_addr) != 0x58);
5984         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5985                 admin_oq_element_array_addr) != 0x60);
5986         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5987                 admin_iq_ci_addr) != 0x68);
5988         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5989                 admin_oq_pi_addr) != 0x70);
5990         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5991                 admin_iq_num_elements) != 0x78);
5992         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5993                 admin_oq_num_elements) != 0x79);
5994         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5995                 admin_queue_int_msg_num) != 0x7a);
5996         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5997                 device_error) != 0x80);
5998         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5999                 error_details) != 0x88);
6000         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6001                 device_reset) != 0x90);
6002         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6003                 power_action) != 0x94);
6004         BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
6005
6006         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6007                 header.iu_type) != 0);
6008         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6009                 header.iu_length) != 2);
6010         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6011                 header.work_area) != 6);
6012         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6013                 request_id) != 8);
6014         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6015                 function_code) != 10);
6016         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6017                 data.report_device_capability.buffer_length) != 44);
6018         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6019                 data.report_device_capability.sg_descriptor) != 48);
6020         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6021                 data.create_operational_iq.queue_id) != 12);
6022         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6023                 data.create_operational_iq.element_array_addr) != 16);
6024         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6025                 data.create_operational_iq.ci_addr) != 24);
6026         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6027                 data.create_operational_iq.num_elements) != 32);
6028         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6029                 data.create_operational_iq.element_length) != 34);
6030         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6031                 data.create_operational_iq.queue_protocol) != 36);
6032         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6033                 data.create_operational_oq.queue_id) != 12);
6034         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6035                 data.create_operational_oq.element_array_addr) != 16);
6036         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6037                 data.create_operational_oq.pi_addr) != 24);
6038         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6039                 data.create_operational_oq.num_elements) != 32);
6040         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6041                 data.create_operational_oq.element_length) != 34);
6042         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6043                 data.create_operational_oq.queue_protocol) != 36);
6044         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6045                 data.create_operational_oq.int_msg_num) != 40);
6046         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6047                 data.create_operational_oq.coalescing_count) != 42);
6048         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6049                 data.create_operational_oq.min_coalescing_time) != 44);
6050         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6051                 data.create_operational_oq.max_coalescing_time) != 48);
6052         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6053                 data.delete_operational_queue.queue_id) != 12);
6054         BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6055         BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6056                 data.create_operational_iq) != 64 - 11);
6057         BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6058                 data.create_operational_oq) != 64 - 11);
6059         BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6060                 data.delete_operational_queue) != 64 - 11);
6061
6062         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6063                 header.iu_type) != 0);
6064         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6065                 header.iu_length) != 2);
6066         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6067                 header.work_area) != 6);
6068         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6069                 request_id) != 8);
6070         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6071                 function_code) != 10);
6072         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6073                 status) != 11);
6074         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6075                 data.create_operational_iq.status_descriptor) != 12);
6076         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6077                 data.create_operational_iq.iq_pi_offset) != 16);
6078         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6079                 data.create_operational_oq.status_descriptor) != 12);
6080         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6081                 data.create_operational_oq.oq_ci_offset) != 16);
6082         BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6083
6084         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6085                 header.iu_type) != 0);
6086         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6087                 header.iu_length) != 2);
6088         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6089                 header.response_queue_id) != 4);
6090         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6091                 header.work_area) != 6);
6092         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6093                 request_id) != 8);
6094         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6095                 nexus_id) != 10);
6096         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6097                 buffer_length) != 12);
6098         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6099                 lun_number) != 16);
6100         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6101                 protocol_specific) != 24);
6102         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6103                 error_index) != 27);
6104         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6105                 cdb) != 32);
6106         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6107                 sg_descriptors) != 64);
6108         BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6109                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6110
6111         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6112                 header.iu_type) != 0);
6113         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6114                 header.iu_length) != 2);
6115         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6116                 header.response_queue_id) != 4);
6117         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6118                 header.work_area) != 6);
6119         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6120                 request_id) != 8);
6121         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6122                 nexus_id) != 12);
6123         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6124                 buffer_length) != 16);
6125         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6126                 data_encryption_key_index) != 22);
6127         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6128                 encrypt_tweak_lower) != 24);
6129         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6130                 encrypt_tweak_upper) != 28);
6131         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6132                 cdb) != 32);
6133         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6134                 error_index) != 48);
6135         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6136                 num_sg_descriptors) != 50);
6137         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6138                 cdb_length) != 51);
6139         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6140                 lun_number) != 52);
6141         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6142                 sg_descriptors) != 64);
6143         BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6144                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6145
6146         BUILD_BUG_ON(offsetof(struct pqi_io_response,
6147                 header.iu_type) != 0);
6148         BUILD_BUG_ON(offsetof(struct pqi_io_response,
6149                 header.iu_length) != 2);
6150         BUILD_BUG_ON(offsetof(struct pqi_io_response,
6151                 request_id) != 8);
6152         BUILD_BUG_ON(offsetof(struct pqi_io_response,
6153                 error_index) != 10);
6154
6155         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6156                 header.iu_type) != 0);
6157         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6158                 header.iu_length) != 2);
6159         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6160                 header.response_queue_id) != 4);
6161         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6162                 request_id) != 8);
6163         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6164                 data.report_event_configuration.buffer_length) != 12);
6165         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6166                 data.report_event_configuration.sg_descriptors) != 16);
6167         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6168                 data.set_event_configuration.global_event_oq_id) != 10);
6169         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6170                 data.set_event_configuration.buffer_length) != 12);
6171         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6172                 data.set_event_configuration.sg_descriptors) != 16);
6173
6174         BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6175                 max_inbound_iu_length) != 6);
6176         BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6177                 max_outbound_iu_length) != 14);
6178         BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6179
6180         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6181                 data_length) != 0);
6182         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6183                 iq_arbitration_priority_support_bitmask) != 8);
6184         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6185                 maximum_aw_a) != 9);
6186         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6187                 maximum_aw_b) != 10);
6188         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6189                 maximum_aw_c) != 11);
6190         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6191                 max_inbound_queues) != 16);
6192         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6193                 max_elements_per_iq) != 18);
6194         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6195                 max_iq_element_length) != 24);
6196         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6197                 min_iq_element_length) != 26);
6198         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6199                 max_outbound_queues) != 30);
6200         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6201                 max_elements_per_oq) != 32);
6202         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6203                 intr_coalescing_time_granularity) != 34);
6204         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6205                 max_oq_element_length) != 36);
6206         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6207                 min_oq_element_length) != 38);
6208         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6209                 iu_layer_descriptors) != 64);
6210         BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6211
6212         BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6213                 event_type) != 0);
6214         BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6215                 oq_id) != 2);
6216         BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
6217
6218         BUILD_BUG_ON(offsetof(struct pqi_event_config,
6219                 num_event_descriptors) != 2);
6220         BUILD_BUG_ON(offsetof(struct pqi_event_config,
6221                 descriptors) != 4);
6222
6223         BUILD_BUG_ON(offsetof(struct pqi_event_response,
6224                 header.iu_type) != 0);
6225         BUILD_BUG_ON(offsetof(struct pqi_event_response,
6226                 header.iu_length) != 2);
6227         BUILD_BUG_ON(offsetof(struct pqi_event_response,
6228                 event_type) != 8);
6229         BUILD_BUG_ON(offsetof(struct pqi_event_response,
6230                 event_id) != 10);
6231         BUILD_BUG_ON(offsetof(struct pqi_event_response,
6232                 additional_event_id) != 12);
6233         BUILD_BUG_ON(offsetof(struct pqi_event_response,
6234                 data) != 16);
6235         BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
6236
6237         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6238                 header.iu_type) != 0);
6239         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6240                 header.iu_length) != 2);
6241         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6242                 event_type) != 8);
6243         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6244                 event_id) != 10);
6245         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6246                 additional_event_id) != 12);
6247         BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
6248
6249         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6250                 header.iu_type) != 0);
6251         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6252                 header.iu_length) != 2);
6253         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6254                 request_id) != 8);
6255         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6256                 nexus_id) != 10);
6257         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6258                 lun_number) != 16);
6259         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6260                 protocol_specific) != 24);
6261         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6262                 outbound_queue_id_to_manage) != 26);
6263         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6264                 request_id_to_manage) != 28);
6265         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6266                 task_management_function) != 30);
6267         BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
6268
6269         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6270                 header.iu_type) != 0);
6271         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6272                 header.iu_length) != 2);
6273         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6274                 request_id) != 8);
6275         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6276                 nexus_id) != 10);
6277         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6278                 additional_response_info) != 12);
6279         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6280                 response_code) != 15);
6281         BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
6282
6283         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6284                 configured_logical_drive_count) != 0);
6285         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6286                 configuration_signature) != 1);
6287         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6288                 firmware_version) != 5);
6289         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6290                 extended_logical_unit_count) != 154);
6291         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6292                 firmware_build_number) != 190);
6293         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6294                 controller_mode) != 292);
6295
6296         BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
6297         BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
6298         BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
6299                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6300         BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
6301                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6302         BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
6303         BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
6304                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6305         BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
6306         BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
6307                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6308
6309         BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
6310 }