GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / scsi / smartpqi / smartpqi.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *    driver for Microchip PQI-based storage controllers
4  *    Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
5  *    Copyright (c) 2016-2018 Microsemi Corporation
6  *    Copyright (c) 2016 PMC-Sierra, Inc.
7  *
8  *    Questions/Comments/Bugfixes to storagedev@microchip.com
9  *
10  */
11
12 #include <linux/io-64-nonatomic-lo-hi.h>
13
14 #if !defined(_SMARTPQI_H)
15 #define _SMARTPQI_H
16
17 #include <scsi/scsi_host.h>
18 #include <linux/bsg-lib.h>
19
20 #pragma pack(1)
21
22 #define PQI_DEVICE_SIGNATURE    "PQI DREG"
23
24 /* This structure is defined by the PQI specification. */
25 struct pqi_device_registers {
26         __le64  signature;
27         u8      function_and_status_code;
28         u8      reserved[7];
29         u8      max_admin_iq_elements;
30         u8      max_admin_oq_elements;
31         u8      admin_iq_element_length;        /* in 16-byte units */
32         u8      admin_oq_element_length;        /* in 16-byte units */
33         __le16  max_reset_timeout;              /* in 100-millisecond units */
34         u8      reserved1[2];
35         __le32  legacy_intx_status;
36         __le32  legacy_intx_mask_set;
37         __le32  legacy_intx_mask_clear;
38         u8      reserved2[28];
39         __le32  device_status;
40         u8      reserved3[4];
41         __le64  admin_iq_pi_offset;
42         __le64  admin_oq_ci_offset;
43         __le64  admin_iq_element_array_addr;
44         __le64  admin_oq_element_array_addr;
45         __le64  admin_iq_ci_addr;
46         __le64  admin_oq_pi_addr;
47         u8      admin_iq_num_elements;
48         u8      admin_oq_num_elements;
49         __le16  admin_queue_int_msg_num;
50         u8      reserved4[4];
51         __le32  device_error;
52         u8      reserved5[4];
53         __le64  error_details;
54         __le32  device_reset;
55         __le32  power_action;
56         u8      reserved6[104];
57 };
58
59 /*
60  * controller registers
61  *
62  * These are defined by the Microchip implementation.
63  *
64  * Some registers (those named sis_*) are only used when in
65  * legacy SIS mode before we transition the controller into
66  * PQI mode.  There are a number of other SIS mode registers,
67  * but we don't use them, so only the SIS registers that we
68  * care about are defined here.  The offsets mentioned in the
69  * comments are the offsets from the PCIe BAR 0.
70  */
71 struct pqi_ctrl_registers {
72         u8      reserved[0x20];
73         __le32  sis_host_to_ctrl_doorbell;              /* 20h */
74         u8      reserved1[0x34 - (0x20 + sizeof(__le32))];
75         __le32  sis_interrupt_mask;                     /* 34h */
76         u8      reserved2[0x9c - (0x34 + sizeof(__le32))];
77         __le32  sis_ctrl_to_host_doorbell;              /* 9Ch */
78         u8      reserved3[0xa0 - (0x9c + sizeof(__le32))];
79         __le32  sis_ctrl_to_host_doorbell_clear;        /* A0h */
80         u8      reserved4[0xb0 - (0xa0 + sizeof(__le32))];
81         __le32  sis_driver_scratch;                     /* B0h */
82         __le32  sis_product_identifier;                 /* B4h */
83         u8      reserved5[0xbc - (0xb4 + sizeof(__le32))];
84         __le32  sis_firmware_status;                    /* BCh */
85         u8      reserved6[0xcc - (0xbc + sizeof(__le32))];
86         __le32  sis_ctrl_shutdown_reason_code;          /* CCh */
87         u8      reserved7[0x1000 - (0xcc + sizeof(__le32))];
88         __le32  sis_mailbox[8];                         /* 1000h */
89         u8      reserved8[0x4000 - (0x1000 + (sizeof(__le32) * 8))];
90         /*
91          * The PQI spec states that the PQI registers should be at
92          * offset 0 from the PCIe BAR 0.  However, we can't map
93          * them at offset 0 because that would break compatibility
94          * with the SIS registers.  So we map them at offset 4000h.
95          */
96         struct pqi_device_registers pqi_registers;      /* 4000h */
97 };
98
99 #define PQI_DEVICE_REGISTERS_OFFSET     0x4000
100
101 /* shutdown reasons for taking the controller offline */
102 enum pqi_ctrl_shutdown_reason {
103         PQI_IQ_NOT_DRAINED_TIMEOUT = 1,
104         PQI_LUN_RESET_TIMEOUT = 2,
105         PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT = 3,
106         PQI_NO_HEARTBEAT = 4,
107         PQI_FIRMWARE_KERNEL_NOT_UP = 5,
108         PQI_OFA_RESPONSE_TIMEOUT = 6,
109         PQI_INVALID_REQ_ID = 7,
110         PQI_UNMATCHED_REQ_ID = 8,
111         PQI_IO_PI_OUT_OF_RANGE = 9,
112         PQI_EVENT_PI_OUT_OF_RANGE = 10,
113         PQI_UNEXPECTED_IU_TYPE = 11
114 };
115
116 enum pqi_io_path {
117         RAID_PATH = 0,
118         AIO_PATH = 1
119 };
120
121 enum pqi_irq_mode {
122         IRQ_MODE_NONE,
123         IRQ_MODE_INTX,
124         IRQ_MODE_MSIX
125 };
126
127 struct pqi_sg_descriptor {
128         __le64  address;
129         __le32  length;
130         __le32  flags;
131 };
132
133 /* manifest constants for the flags field of pqi_sg_descriptor */
134 #define CISS_SG_LAST    0x40000000
135 #define CISS_SG_CHAIN   0x80000000
136
137 struct pqi_iu_header {
138         u8      iu_type;
139         u8      reserved;
140         __le16  iu_length;      /* in bytes - does not include the length */
141                                 /* of this header */
142         __le16  response_queue_id;      /* specifies the OQ where the */
143                                         /* response IU is to be delivered */
144         u16     driver_flags;   /* reserved for driver use */
145 };
146
147 /* manifest constants for pqi_iu_header.driver_flags */
148 #define PQI_DRIVER_NONBLOCKABLE_REQUEST         0x1
149
150 /*
151  * According to the PQI spec, the IU header is only the first 4 bytes of our
152  * pqi_iu_header structure.
153  */
154 #define PQI_REQUEST_HEADER_LENGTH       4
155
156 struct pqi_general_admin_request {
157         struct pqi_iu_header header;
158         __le16  request_id;
159         u8      function_code;
160         union {
161                 struct {
162                         u8      reserved[33];
163                         __le32  buffer_length;
164                         struct pqi_sg_descriptor sg_descriptor;
165                 } report_device_capability;
166
167                 struct {
168                         u8      reserved;
169                         __le16  queue_id;
170                         u8      reserved1[2];
171                         __le64  element_array_addr;
172                         __le64  ci_addr;
173                         __le16  num_elements;
174                         __le16  element_length;
175                         u8      queue_protocol;
176                         u8      reserved2[23];
177                         __le32  vendor_specific;
178                 } create_operational_iq;
179
180                 struct {
181                         u8      reserved;
182                         __le16  queue_id;
183                         u8      reserved1[2];
184                         __le64  element_array_addr;
185                         __le64  pi_addr;
186                         __le16  num_elements;
187                         __le16  element_length;
188                         u8      queue_protocol;
189                         u8      reserved2[3];
190                         __le16  int_msg_num;
191                         __le16  coalescing_count;
192                         __le32  min_coalescing_time;
193                         __le32  max_coalescing_time;
194                         u8      reserved3[8];
195                         __le32  vendor_specific;
196                 } create_operational_oq;
197
198                 struct {
199                         u8      reserved;
200                         __le16  queue_id;
201                         u8      reserved1[50];
202                 } delete_operational_queue;
203
204                 struct {
205                         u8      reserved;
206                         __le16  queue_id;
207                         u8      reserved1[46];
208                         __le32  vendor_specific;
209                 } change_operational_iq_properties;
210
211         } data;
212 };
213
214 struct pqi_general_admin_response {
215         struct pqi_iu_header header;
216         __le16  request_id;
217         u8      function_code;
218         u8      status;
219         union {
220                 struct {
221                         u8      status_descriptor[4];
222                         __le64  iq_pi_offset;
223                         u8      reserved[40];
224                 } create_operational_iq;
225
226                 struct {
227                         u8      status_descriptor[4];
228                         __le64  oq_ci_offset;
229                         u8      reserved[40];
230                 } create_operational_oq;
231         } data;
232 };
233
234 struct pqi_iu_layer_descriptor {
235         u8      inbound_spanning_supported : 1;
236         u8      reserved : 7;
237         u8      reserved1[5];
238         __le16  max_inbound_iu_length;
239         u8      outbound_spanning_supported : 1;
240         u8      reserved2 : 7;
241         u8      reserved3[5];
242         __le16  max_outbound_iu_length;
243 };
244
245 struct pqi_device_capability {
246         __le16  data_length;
247         u8      reserved[6];
248         u8      iq_arbitration_priority_support_bitmask;
249         u8      maximum_aw_a;
250         u8      maximum_aw_b;
251         u8      maximum_aw_c;
252         u8      max_arbitration_burst : 3;
253         u8      reserved1 : 4;
254         u8      iqa : 1;
255         u8      reserved2[2];
256         u8      iq_freeze : 1;
257         u8      reserved3 : 7;
258         __le16  max_inbound_queues;
259         __le16  max_elements_per_iq;
260         u8      reserved4[4];
261         __le16  max_iq_element_length;
262         __le16  min_iq_element_length;
263         u8      reserved5[2];
264         __le16  max_outbound_queues;
265         __le16  max_elements_per_oq;
266         __le16  intr_coalescing_time_granularity;
267         __le16  max_oq_element_length;
268         __le16  min_oq_element_length;
269         u8      reserved6[24];
270         struct pqi_iu_layer_descriptor iu_layer_descriptors[32];
271 };
272
273 #define PQI_MAX_EMBEDDED_SG_DESCRIPTORS         4
274 #define PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS     3
275
276 struct pqi_raid_path_request {
277         struct pqi_iu_header header;
278         __le16  request_id;
279         __le16  nexus_id;
280         __le32  buffer_length;
281         u8      lun_number[8];
282         __le16  protocol_specific;
283         u8      data_direction : 2;
284         u8      partial : 1;
285         u8      reserved1 : 4;
286         u8      fence : 1;
287         __le16  error_index;
288         u8      reserved2;
289         u8      task_attribute : 3;
290         u8      command_priority : 4;
291         u8      reserved3 : 1;
292         u8      reserved4 : 2;
293         u8      additional_cdb_bytes_usage : 3;
294         u8      reserved5 : 3;
295         u8      cdb[16];
296         u8      reserved6[12];
297         __le32  timeout;
298         struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
299 };
300
301 struct pqi_aio_path_request {
302         struct pqi_iu_header header;
303         __le16  request_id;
304         u8      reserved1[2];
305         __le32  nexus_id;
306         __le32  buffer_length;
307         u8      data_direction : 2;
308         u8      partial : 1;
309         u8      memory_type : 1;
310         u8      fence : 1;
311         u8      encryption_enable : 1;
312         u8      reserved2 : 2;
313         u8      task_attribute : 3;
314         u8      command_priority : 4;
315         u8      reserved3 : 1;
316         __le16  data_encryption_key_index;
317         __le32  encrypt_tweak_lower;
318         __le32  encrypt_tweak_upper;
319         u8      cdb[16];
320         __le16  error_index;
321         u8      num_sg_descriptors;
322         u8      cdb_length;
323         u8      lun_number[8];
324         u8      reserved4[4];
325         struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
326 };
327
328 #define PQI_RAID1_NVME_XFER_LIMIT       (32 * 1024)     /* 32 KiB */
329
330 struct pqi_aio_r1_path_request {
331         struct pqi_iu_header header;
332         __le16  request_id;
333         __le16  volume_id;      /* ID of the RAID volume */
334         __le32  it_nexus_1;     /* IT nexus of the 1st drive in the RAID volume */
335         __le32  it_nexus_2;     /* IT nexus of the 2nd drive in the RAID volume */
336         __le32  it_nexus_3;     /* IT nexus of the 3rd drive in the RAID volume */
337         __le32  data_length;    /* total bytes to read/write */
338         u8      data_direction : 2;
339         u8      partial : 1;
340         u8      memory_type : 1;
341         u8      fence : 1;
342         u8      encryption_enable : 1;
343         u8      reserved : 2;
344         u8      task_attribute : 3;
345         u8      command_priority : 4;
346         u8      reserved2 : 1;
347         __le16  data_encryption_key_index;
348         u8      cdb[16];
349         __le16  error_index;
350         u8      num_sg_descriptors;
351         u8      cdb_length;
352         u8      num_drives;     /* number of drives in the RAID volume (2 or 3) */
353         u8      reserved3[3];
354         __le32  encrypt_tweak_lower;
355         __le32  encrypt_tweak_upper;
356         struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
357 };
358
359 #define PQI_DEFAULT_MAX_WRITE_RAID_5_6                  (8 * 1024U)
360 #define PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA     (~0U)
361 #define PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME         (32 * 1024U)
362
363 struct pqi_aio_r56_path_request {
364         struct pqi_iu_header header;
365         __le16  request_id;
366         __le16  volume_id;              /* ID of the RAID volume */
367         __le32  data_it_nexus;          /* IT nexus for the data drive */
368         __le32  p_parity_it_nexus;      /* IT nexus for the P parity drive */
369         __le32  q_parity_it_nexus;      /* IT nexus for the Q parity drive */
370         __le32  data_length;            /* total bytes to read/write */
371         u8      data_direction : 2;
372         u8      partial : 1;
373         u8      mem_type : 1;           /* 0 = PCIe, 1 = DDR */
374         u8      fence : 1;
375         u8      encryption_enable : 1;
376         u8      reserved : 2;
377         u8      task_attribute : 3;
378         u8      command_priority : 4;
379         u8      reserved1 : 1;
380         __le16  data_encryption_key_index;
381         u8      cdb[16];
382         __le16  error_index;
383         u8      num_sg_descriptors;
384         u8      cdb_length;
385         u8      xor_multiplier;
386         u8      reserved2[3];
387         __le32  encrypt_tweak_lower;
388         __le32  encrypt_tweak_upper;
389         __le64  row;                    /* row = logical LBA/blocks per row */
390         u8      reserved3[8];
391         struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS];
392 };
393
394 struct pqi_io_response {
395         struct pqi_iu_header header;
396         __le16  request_id;
397         __le16  error_index;
398         u8      reserved2[4];
399 };
400
401 struct pqi_general_management_request {
402         struct pqi_iu_header header;
403         __le16  request_id;
404         union {
405                 struct {
406                         u8      reserved[2];
407                         __le32  buffer_length;
408                         struct pqi_sg_descriptor sg_descriptors[3];
409                 } report_event_configuration;
410
411                 struct {
412                         __le16  global_event_oq_id;
413                         __le32  buffer_length;
414                         struct pqi_sg_descriptor sg_descriptors[3];
415                 } set_event_configuration;
416         } data;
417 };
418
419 struct pqi_event_descriptor {
420         u8      event_type;
421         u8      reserved;
422         __le16  oq_id;
423 };
424
425 struct pqi_event_config {
426         u8      reserved[2];
427         u8      num_event_descriptors;
428         u8      reserved1;
429         struct pqi_event_descriptor descriptors[];
430 };
431
432 #define PQI_MAX_EVENT_DESCRIPTORS       255
433
434 #define PQI_EVENT_OFA_MEMORY_ALLOCATION 0x0
435 #define PQI_EVENT_OFA_QUIESCE           0x1
436 #define PQI_EVENT_OFA_CANCELED          0x2
437
438 struct pqi_event_response {
439         struct pqi_iu_header header;
440         u8      event_type;
441         u8      reserved2 : 7;
442         u8      request_acknowledge : 1;
443         __le16  event_id;
444         __le32  additional_event_id;
445         union {
446                 struct {
447                         __le32  bytes_requested;
448                         u8      reserved[12];
449                 } ofa_memory_allocation;
450
451                 struct {
452                         __le16  reason;         /* reason for cancellation */
453                         u8      reserved[14];
454                 } ofa_cancelled;
455         } data;
456 };
457
458 struct pqi_event_acknowledge_request {
459         struct pqi_iu_header header;
460         u8      event_type;
461         u8      reserved2;
462         __le16  event_id;
463         __le32  additional_event_id;
464 };
465
466 struct pqi_task_management_request {
467         struct pqi_iu_header header;
468         __le16  request_id;
469         __le16  nexus_id;
470         u8      reserved[2];
471         __le16  timeout;
472         u8      lun_number[8];
473         __le16  protocol_specific;
474         __le16  outbound_queue_id_to_manage;
475         __le16  request_id_to_manage;
476         u8      task_management_function;
477         u8      reserved2 : 7;
478         u8      fence : 1;
479 };
480
481 #define SOP_TASK_MANAGEMENT_LUN_RESET   0x8
482
483 struct pqi_task_management_response {
484         struct pqi_iu_header header;
485         __le16  request_id;
486         __le16  nexus_id;
487         u8      additional_response_info[3];
488         u8      response_code;
489 };
490
491 struct pqi_vendor_general_request {
492         struct pqi_iu_header header;
493         __le16  request_id;
494         __le16  function_code;
495         union {
496                 struct {
497                         __le16  first_section;
498                         __le16  last_section;
499                         u8      reserved[48];
500                 } config_table_update;
501
502                 struct {
503                         __le64  buffer_address;
504                         __le32  buffer_length;
505                         u8      reserved[40];
506                 } ofa_memory_allocation;
507         } data;
508 };
509
510 struct pqi_vendor_general_response {
511         struct pqi_iu_header header;
512         __le16  request_id;
513         __le16  function_code;
514         __le16  status;
515         u8      reserved[2];
516 };
517
518 #define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE  0
519 #define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE   1
520
521 #define PQI_OFA_VERSION                 1
522 #define PQI_OFA_SIGNATURE               "OFA_QRM"
523 #define PQI_OFA_MAX_SG_DESCRIPTORS      64
524
525 struct pqi_ofa_memory {
526         __le64  signature;      /* "OFA_QRM" */
527         __le16  version;        /* version of this struct (1 = 1st version) */
528         u8      reserved[62];
529         __le32  bytes_allocated;        /* total allocated memory in bytes */
530         __le16  num_memory_descriptors;
531         u8      reserved1[2];
532         struct pqi_sg_descriptor sg_descriptor[PQI_OFA_MAX_SG_DESCRIPTORS];
533 };
534
535 struct pqi_aio_error_info {
536         u8      status;
537         u8      service_response;
538         u8      data_present;
539         u8      reserved;
540         __le32  residual_count;
541         __le16  data_length;
542         __le16  reserved1;
543         u8      data[256];
544 };
545
546 struct pqi_raid_error_info {
547         u8      data_in_result;
548         u8      data_out_result;
549         u8      reserved[3];
550         u8      status;
551         __le16  status_qualifier;
552         __le16  sense_data_length;
553         __le16  response_data_length;
554         __le32  data_in_transferred;
555         __le32  data_out_transferred;
556         u8      data[256];
557 };
558
559 #define PQI_REQUEST_IU_TASK_MANAGEMENT                  0x13
560 #define PQI_REQUEST_IU_RAID_PATH_IO                     0x14
561 #define PQI_REQUEST_IU_AIO_PATH_IO                      0x15
562 #define PQI_REQUEST_IU_AIO_PATH_RAID5_IO                0x18
563 #define PQI_REQUEST_IU_AIO_PATH_RAID6_IO                0x19
564 #define PQI_REQUEST_IU_AIO_PATH_RAID1_IO                0x1A
565 #define PQI_REQUEST_IU_GENERAL_ADMIN                    0x60
566 #define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG       0x72
567 #define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG          0x73
568 #define PQI_REQUEST_IU_VENDOR_GENERAL                   0x75
569 #define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT         0xf6
570
571 #define PQI_RESPONSE_IU_GENERAL_MANAGEMENT              0x81
572 #define PQI_RESPONSE_IU_TASK_MANAGEMENT                 0x93
573 #define PQI_RESPONSE_IU_GENERAL_ADMIN                   0xe0
574 #define PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS            0xf0
575 #define PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS             0xf1
576 #define PQI_RESPONSE_IU_RAID_PATH_IO_ERROR              0xf2
577 #define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR               0xf3
578 #define PQI_RESPONSE_IU_AIO_PATH_DISABLED               0xf4
579 #define PQI_RESPONSE_IU_VENDOR_EVENT                    0xf5
580 #define PQI_RESPONSE_IU_VENDOR_GENERAL                  0xf7
581
582 #define PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY     0x0
583 #define PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ                    0x10
584 #define PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ                    0x11
585 #define PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ                    0x12
586 #define PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ                    0x13
587 #define PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY           0x14
588
589 #define PQI_GENERAL_ADMIN_STATUS_SUCCESS        0x0
590
591 #define PQI_IQ_PROPERTY_IS_AIO_QUEUE    0x1
592
593 #define PQI_GENERAL_ADMIN_IU_LENGTH             0x3c
594 #define PQI_PROTOCOL_SOP                        0x0
595
596 #define PQI_DATA_IN_OUT_GOOD                                    0x0
597 #define PQI_DATA_IN_OUT_UNDERFLOW                               0x1
598 #define PQI_DATA_IN_OUT_BUFFER_ERROR                            0x40
599 #define PQI_DATA_IN_OUT_BUFFER_OVERFLOW                         0x41
600 #define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA         0x42
601 #define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE                  0x43
602 #define PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR                       0x60
603 #define PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT                 0x61
604 #define PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED           0x62
605 #define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED       0x63
606 #define PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED                  0x64
607 #define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST                0x65
608 #define PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION                      0x66
609 #define PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED                 0x67
610 #define PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ               0x6F
611 #define PQI_DATA_IN_OUT_ERROR                                   0xf0
612 #define PQI_DATA_IN_OUT_PROTOCOL_ERROR                          0xf1
613 #define PQI_DATA_IN_OUT_HARDWARE_ERROR                          0xf2
614 #define PQI_DATA_IN_OUT_UNSOLICITED_ABORT                       0xf3
615 #define PQI_DATA_IN_OUT_ABORTED                                 0xf4
616 #define PQI_DATA_IN_OUT_TIMEOUT                                 0xf5
617
618 #define CISS_CMD_STATUS_SUCCESS                 0x0
619 #define CISS_CMD_STATUS_TARGET_STATUS           0x1
620 #define CISS_CMD_STATUS_DATA_UNDERRUN           0x2
621 #define CISS_CMD_STATUS_DATA_OVERRUN            0x3
622 #define CISS_CMD_STATUS_INVALID                 0x4
623 #define CISS_CMD_STATUS_PROTOCOL_ERROR          0x5
624 #define CISS_CMD_STATUS_HARDWARE_ERROR          0x6
625 #define CISS_CMD_STATUS_CONNECTION_LOST         0x7
626 #define CISS_CMD_STATUS_ABORTED                 0x8
627 #define CISS_CMD_STATUS_ABORT_FAILED            0x9
628 #define CISS_CMD_STATUS_UNSOLICITED_ABORT       0xa
629 #define CISS_CMD_STATUS_TIMEOUT                 0xb
630 #define CISS_CMD_STATUS_UNABORTABLE             0xc
631 #define CISS_CMD_STATUS_TMF                     0xd
632 #define CISS_CMD_STATUS_AIO_DISABLED            0xe
633
634 #define PQI_CMD_STATUS_ABORTED  CISS_CMD_STATUS_ABORTED
635
636 #define PQI_NUM_EVENT_QUEUE_ELEMENTS    32
637 #define PQI_EVENT_OQ_ELEMENT_LENGTH     sizeof(struct pqi_event_response)
638
639 #define PQI_EVENT_TYPE_HOTPLUG                  0x1
640 #define PQI_EVENT_TYPE_HARDWARE                 0x2
641 #define PQI_EVENT_TYPE_PHYSICAL_DEVICE          0x4
642 #define PQI_EVENT_TYPE_LOGICAL_DEVICE           0x5
643 #define PQI_EVENT_TYPE_OFA                      0xfb
644 #define PQI_EVENT_TYPE_AIO_STATE_CHANGE         0xfd
645 #define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE        0xfe
646
647 #pragma pack()
648
649 #define PQI_ERROR_BUFFER_ELEMENT_LENGTH         \
650         sizeof(struct pqi_raid_error_info)
651
652 /* these values are based on our implementation */
653 #define PQI_ADMIN_IQ_NUM_ELEMENTS               8
654 #define PQI_ADMIN_OQ_NUM_ELEMENTS               20
655 #define PQI_ADMIN_IQ_ELEMENT_LENGTH             64
656 #define PQI_ADMIN_OQ_ELEMENT_LENGTH             64
657
658 #define PQI_OPERATIONAL_IQ_ELEMENT_LENGTH       128
659 #define PQI_OPERATIONAL_OQ_ELEMENT_LENGTH       16
660
661 #define PQI_MIN_MSIX_VECTORS            1
662 #define PQI_MAX_MSIX_VECTORS            64
663
664 /* these values are defined by the PQI spec */
665 #define PQI_MAX_NUM_ELEMENTS_ADMIN_QUEUE        255
666 #define PQI_MAX_NUM_ELEMENTS_OPERATIONAL_QUEUE  65535
667
668 #define PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT       64
669 #define PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT      16
670 #define PQI_ADMIN_INDEX_ALIGNMENT               64
671 #define PQI_OPERATIONAL_INDEX_ALIGNMENT         4
672
673 #define PQI_MIN_OPERATIONAL_QUEUE_ID            1
674 #define PQI_MAX_OPERATIONAL_QUEUE_ID            65535
675
676 #define PQI_AIO_SERV_RESPONSE_COMPLETE          0
677 #define PQI_AIO_SERV_RESPONSE_FAILURE           1
678 #define PQI_AIO_SERV_RESPONSE_TMF_COMPLETE      2
679 #define PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED     3
680 #define PQI_AIO_SERV_RESPONSE_TMF_REJECTED      4
681 #define PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN 5
682
683 #define PQI_AIO_STATUS_IO_ERROR                 0x1
684 #define PQI_AIO_STATUS_IO_ABORTED               0x2
685 #define PQI_AIO_STATUS_NO_PATH_TO_DEVICE        0x3
686 #define PQI_AIO_STATUS_INVALID_DEVICE           0x4
687 #define PQI_AIO_STATUS_AIO_PATH_DISABLED        0xe
688 #define PQI_AIO_STATUS_UNDERRUN                 0x51
689 #define PQI_AIO_STATUS_OVERRUN                  0x75
690
691 typedef u32 pqi_index_t;
692
693 /* SOP data direction flags */
694 #define SOP_NO_DIRECTION_FLAG   0
695 #define SOP_WRITE_FLAG          1       /* host writes data to Data-Out */
696                                         /* buffer */
697 #define SOP_READ_FLAG           2       /* host receives data from Data-In */
698                                         /* buffer */
699 #define SOP_BIDIRECTIONAL       3       /* data is transferred from the */
700                                         /* Data-Out buffer and data is */
701                                         /* transferred to the Data-In buffer */
702
703 #define SOP_TASK_ATTRIBUTE_SIMPLE               0
704 #define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE        1
705 #define SOP_TASK_ATTRIBUTE_ORDERED              2
706 #define SOP_TASK_ATTRIBUTE_ACA                  4
707
708 #define SOP_TMF_COMPLETE                0x0
709 #define SOP_TMF_REJECTED                0x4
710 #define SOP_TMF_FUNCTION_SUCCEEDED      0x8
711
712 /* additional CDB bytes usage field codes */
713 #define SOP_ADDITIONAL_CDB_BYTES_0      0       /* 16-byte CDB */
714 #define SOP_ADDITIONAL_CDB_BYTES_4      1       /* 20-byte CDB */
715 #define SOP_ADDITIONAL_CDB_BYTES_8      2       /* 24-byte CDB */
716 #define SOP_ADDITIONAL_CDB_BYTES_12     3       /* 28-byte CDB */
717 #define SOP_ADDITIONAL_CDB_BYTES_16     4       /* 32-byte CDB */
718
719 /*
720  * The purpose of this structure is to obtain proper alignment of objects in
721  * an admin queue pair.
722  */
723 struct pqi_admin_queues_aligned {
724         __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
725                 u8      iq_element_array[PQI_ADMIN_IQ_ELEMENT_LENGTH]
726                                         [PQI_ADMIN_IQ_NUM_ELEMENTS];
727         __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
728                 u8      oq_element_array[PQI_ADMIN_OQ_ELEMENT_LENGTH]
729                                         [PQI_ADMIN_OQ_NUM_ELEMENTS];
730         __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t iq_ci;
731         __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t oq_pi;
732 };
733
734 struct pqi_admin_queues {
735         void            *iq_element_array;
736         void            *oq_element_array;
737         pqi_index_t __iomem *iq_ci;
738         pqi_index_t __iomem *oq_pi;
739         dma_addr_t      iq_element_array_bus_addr;
740         dma_addr_t      oq_element_array_bus_addr;
741         dma_addr_t      iq_ci_bus_addr;
742         dma_addr_t      oq_pi_bus_addr;
743         __le32 __iomem  *iq_pi;
744         pqi_index_t     iq_pi_copy;
745         __le32 __iomem  *oq_ci;
746         pqi_index_t     oq_ci_copy;
747         struct task_struct *task;
748         u16             int_msg_num;
749 };
750
751 struct pqi_queue_group {
752         struct pqi_ctrl_info *ctrl_info;        /* backpointer */
753         u16             iq_id[2];
754         u16             oq_id;
755         u16             int_msg_num;
756         void            *iq_element_array[2];
757         void            *oq_element_array;
758         dma_addr_t      iq_element_array_bus_addr[2];
759         dma_addr_t      oq_element_array_bus_addr;
760         __le32 __iomem  *iq_pi[2];
761         pqi_index_t     iq_pi_copy[2];
762         pqi_index_t __iomem *iq_ci[2];
763         pqi_index_t __iomem *oq_pi;
764         dma_addr_t      iq_ci_bus_addr[2];
765         dma_addr_t      oq_pi_bus_addr;
766         __le32 __iomem  *oq_ci;
767         pqi_index_t     oq_ci_copy;
768         spinlock_t      submit_lock[2]; /* protect submission queue */
769         struct list_head request_list[2];
770 };
771
772 struct pqi_event_queue {
773         u16             oq_id;
774         u16             int_msg_num;
775         void            *oq_element_array;
776         pqi_index_t __iomem *oq_pi;
777         dma_addr_t      oq_element_array_bus_addr;
778         dma_addr_t      oq_pi_bus_addr;
779         __le32 __iomem  *oq_ci;
780         pqi_index_t     oq_ci_copy;
781 };
782
783 #define PQI_DEFAULT_QUEUE_GROUP         0
784 #define PQI_MAX_QUEUE_GROUPS            PQI_MAX_MSIX_VECTORS
785
786 struct pqi_encryption_info {
787         u16     data_encryption_key_index;
788         u32     encrypt_tweak_lower;
789         u32     encrypt_tweak_upper;
790 };
791
792 #pragma pack(1)
793
794 #define PQI_CONFIG_TABLE_SIGNATURE      "CFGTABLE"
795 #define PQI_CONFIG_TABLE_MAX_LENGTH     ((u16)~0)
796
797 /* configuration table section IDs */
798 #define PQI_CONFIG_TABLE_ALL_SECTIONS                   (-1)
799 #define PQI_CONFIG_TABLE_SECTION_GENERAL_INFO           0
800 #define PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES      1
801 #define PQI_CONFIG_TABLE_SECTION_FIRMWARE_ERRATA        2
802 #define PQI_CONFIG_TABLE_SECTION_DEBUG                  3
803 #define PQI_CONFIG_TABLE_SECTION_HEARTBEAT              4
804 #define PQI_CONFIG_TABLE_SECTION_SOFT_RESET             5
805
806 struct pqi_config_table {
807         u8      signature[8];           /* "CFGTABLE" */
808         __le32  first_section_offset;   /* offset in bytes from the base */
809                                         /* address of this table to the */
810                                         /* first section */
811 };
812
813 struct pqi_config_table_section_header {
814         __le16  section_id;             /* as defined by the */
815                                         /* PQI_CONFIG_TABLE_SECTION_* */
816                                         /* manifest constants above */
817         __le16  next_section_offset;    /* offset in bytes from base */
818                                         /* address of the table of the */
819                                         /* next section or 0 if last entry */
820 };
821
822 struct pqi_config_table_general_info {
823         struct pqi_config_table_section_header header;
824         __le32  section_length;         /* size of this section in bytes */
825                                         /* including the section header */
826         __le32  max_outstanding_requests;       /* max. outstanding */
827                                                 /* commands supported by */
828                                                 /* the controller */
829         __le32  max_sg_size;            /* max. transfer size of a single */
830                                         /* command */
831         __le32  max_sg_per_request;     /* max. number of scatter-gather */
832                                         /* entries supported in a single */
833                                         /* command */
834 };
835
836 struct pqi_config_table_firmware_features {
837         struct pqi_config_table_section_header header;
838         __le16  num_elements;
839         u8      features_supported[];
840 /*      u8      features_requested_by_host[]; */
841 /*      u8      features_enabled[]; */
842 /* The 2 fields below are only valid if the MAX_KNOWN_FEATURE bit is set. */
843 /*      __le16  firmware_max_known_feature; */
844 /*      __le16  host_max_known_feature; */
845 };
846
847 #define PQI_FIRMWARE_FEATURE_OFA                                0
848 #define PQI_FIRMWARE_FEATURE_SMP                                1
849 #define PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE                  2
850 #define PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS                 3
851 #define PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS                 4
852 #define PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS                 5
853 #define PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS                 6
854 #define PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS                7
855 #define PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS                8
856 #define PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS                9
857 #define PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS                10
858 #define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE               11
859 #define PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN                    12
860 #define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT                    13
861 #define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT                     14
862 #define PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME      15
863 #define PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN     16
864 #define PQI_FIRMWARE_FEATURE_FW_TRIAGE                          17
865 #define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5            18
866 #define PQI_FIRMWARE_FEATURE_MAXIMUM                            18
867
868 struct pqi_config_table_debug {
869         struct pqi_config_table_section_header header;
870         __le32  scratchpad;
871 };
872
873 struct pqi_config_table_heartbeat {
874         struct pqi_config_table_section_header header;
875         __le32  heartbeat_counter;
876 };
877
878 struct pqi_config_table_soft_reset {
879         struct pqi_config_table_section_header header;
880         u8 soft_reset_status;
881 };
882
883 #define PQI_SOFT_RESET_INITIATE         0x1
884 #define PQI_SOFT_RESET_ABORT            0x2
885
886 enum pqi_soft_reset_status {
887         RESET_INITIATE_FIRMWARE,
888         RESET_INITIATE_DRIVER,
889         RESET_ABORT,
890         RESET_NORESPONSE,
891         RESET_TIMEDOUT
892 };
893
894 union pqi_reset_register {
895         struct {
896                 u32     reset_type : 3;
897                 u32     reserved : 2;
898                 u32     reset_action : 3;
899                 u32     hold_in_pd1 : 1;
900                 u32     reserved2 : 23;
901         } bits;
902         u32     all_bits;
903 };
904
905 #define PQI_RESET_ACTION_RESET          0x1
906
907 #define PQI_RESET_TYPE_NO_RESET         0x0
908 #define PQI_RESET_TYPE_SOFT_RESET       0x1
909 #define PQI_RESET_TYPE_FIRM_RESET       0x2
910 #define PQI_RESET_TYPE_HARD_RESET       0x3
911
912 #define PQI_RESET_ACTION_COMPLETED      0x2
913
914 #define PQI_RESET_POLL_INTERVAL_MSECS   100
915
916 #define PQI_MAX_OUTSTANDING_REQUESTS            ((u32)~0)
917 #define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP      32
918 #define PQI_MAX_TRANSFER_SIZE                   (1024U * 1024U)
919 #define PQI_MAX_TRANSFER_SIZE_KDUMP             (512 * 1024U)
920
921 #define RAID_MAP_MAX_ENTRIES                    1024
922 #define RAID_MAP_MAX_DATA_DISKS_PER_ROW         128
923
924 #define PQI_PHYSICAL_DEVICE_BUS         0
925 #define PQI_RAID_VOLUME_BUS             1
926 #define PQI_HBA_BUS                     2
927 #define PQI_EXTERNAL_RAID_VOLUME_BUS    3
928 #define PQI_MAX_BUS                     PQI_EXTERNAL_RAID_VOLUME_BUS
929 #define PQI_VSEP_CISS_BTL               379
930
931 struct report_lun_header {
932         __be32  list_length;
933         u8      flags;
934         u8      reserved[3];
935 };
936
937 /* for flags field of struct report_lun_header */
938 #define CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID      (1 << 0)
939 #define CISS_REPORT_LOG_FLAG_QUEUE_DEPTH        (1 << 5)
940 #define CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX     (1 << 6)
941
942 #define CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2         0x2
943 #define CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4         0x4
944 #define CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK      0xf
945
946 struct report_log_lun {
947         u8      lunid[8];
948         u8      volume_id[16];
949 };
950
951 struct report_log_lun_list {
952         struct report_lun_header header;
953         struct report_log_lun lun_entries[1];
954 };
955
956 struct report_phys_lun_8byte_wwid {
957         u8      lunid[8];
958         __be64  wwid;
959         u8      device_type;
960         u8      device_flags;
961         u8      lun_count;      /* number of LUNs in a multi-LUN device */
962         u8      redundant_paths;
963         u32     aio_handle;
964 };
965
966 struct report_phys_lun_16byte_wwid {
967         u8      lunid[8];
968         u8      wwid[16];
969         u8      device_type;
970         u8      device_flags;
971         u8      lun_count;      /* number of LUNs in a multi-LUN device */
972         u8      redundant_paths;
973         u32     aio_handle;
974 };
975
976 /* for device_flags field of struct report_phys_lun_extended_entry */
977 #define CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED   0x8
978
979 struct report_phys_lun_8byte_wwid_list {
980         struct report_lun_header header;
981         struct report_phys_lun_8byte_wwid lun_entries[1];
982 };
983
984 struct report_phys_lun_16byte_wwid_list {
985         struct report_lun_header header;
986         struct report_phys_lun_16byte_wwid lun_entries[1];
987 };
988
989 struct raid_map_disk_data {
990         u32     aio_handle;
991         u8      xor_mult[2];
992         u8      reserved[2];
993 };
994
995 /* for flags field of RAID map */
996 #define RAID_MAP_ENCRYPTION_ENABLED     0x1
997
998 struct raid_map {
999         __le32  structure_size;         /* size of entire structure in bytes */
1000         __le32  volume_blk_size;        /* bytes / block in the volume */
1001         __le64  volume_blk_cnt;         /* logical blocks on the volume */
1002         u8      phys_blk_shift;         /* shift factor to convert between */
1003                                         /* units of logical blocks and */
1004                                         /* physical disk blocks */
1005         u8      parity_rotation_shift;  /* shift factor to convert between */
1006                                         /* units of logical stripes and */
1007                                         /* physical stripes */
1008         __le16  strip_size;             /* blocks used on each disk / stripe */
1009         __le64  disk_starting_blk;      /* first disk block used in volume */
1010         __le64  disk_blk_cnt;           /* disk blocks used by volume / disk */
1011         __le16  data_disks_per_row;     /* data disk entries / row in the map */
1012         __le16  metadata_disks_per_row; /* mirror/parity disk entries / row */
1013                                         /* in the map */
1014         __le16  row_cnt;                /* rows in each layout map */
1015         __le16  layout_map_count;       /* layout maps (1 map per */
1016                                         /* mirror parity group) */
1017         __le16  flags;
1018         __le16  data_encryption_key_index;
1019         u8      reserved[16];
1020         struct raid_map_disk_data disk_data[RAID_MAP_MAX_ENTRIES];
1021 };
1022
1023 #pragma pack()
1024
1025 struct pqi_scsi_dev_raid_map_data {
1026         bool    is_write;
1027         u8      raid_level;
1028         u32     map_index;
1029         u64     first_block;
1030         u64     last_block;
1031         u32     data_length;
1032         u32     block_cnt;
1033         u32     blocks_per_row;
1034         u64     first_row;
1035         u64     last_row;
1036         u32     first_row_offset;
1037         u32     last_row_offset;
1038         u32     first_column;
1039         u32     last_column;
1040         u64     r5or6_first_row;
1041         u64     r5or6_last_row;
1042         u32     r5or6_first_row_offset;
1043         u32     r5or6_last_row_offset;
1044         u32     r5or6_first_column;
1045         u32     r5or6_last_column;
1046         u16     data_disks_per_row;
1047         u32     total_disks_per_row;
1048         u16     layout_map_count;
1049         u32     stripesize;
1050         u16     strip_size;
1051         u32     first_group;
1052         u32     last_group;
1053         u32     map_row;
1054         u32     aio_handle;
1055         u64     disk_block;
1056         u32     disk_block_cnt;
1057         u8      cdb[16];
1058         u8      cdb_length;
1059
1060         /* RAID 1 specific */
1061 #define NUM_RAID1_MAP_ENTRIES   3
1062         u32     num_it_nexus_entries;
1063         u32     it_nexus[NUM_RAID1_MAP_ENTRIES];
1064
1065         /* RAID 5 / RAID 6 specific */
1066         u32     p_parity_it_nexus;      /* aio_handle */
1067         u32     q_parity_it_nexus;      /* aio_handle */
1068         u8      xor_mult;
1069         u64     row;
1070         u64     stripe_lba;
1071         u32     p_index;
1072         u32     q_index;
1073 };
1074
1075 #define RAID_CTLR_LUNID         "\0\0\0\0\0\0\0\0"
1076
1077 #define NUM_STREAMS_PER_LUN     8
1078
1079 struct pqi_stream_data {
1080         u64     next_lba;
1081         u32     last_accessed;
1082 };
1083
1084 struct pqi_scsi_dev {
1085         int     devtype;                /* as reported by INQUIRY command */
1086         u8      device_type;            /* as reported by */
1087                                         /* BMIC_IDENTIFY_PHYSICAL_DEVICE */
1088                                         /* only valid for devtype = TYPE_DISK */
1089         int     bus;
1090         int     target;
1091         int     lun;
1092         u8      scsi3addr[8];
1093         u8      wwid[16];
1094         u8      volume_id[16];
1095         u8      is_physical_device : 1;
1096         u8      is_external_raid_device : 1;
1097         u8      is_expander_smp_device : 1;
1098         u8      target_lun_valid : 1;
1099         u8      device_gone : 1;
1100         u8      new_device : 1;
1101         u8      keep_device : 1;
1102         u8      volume_offline : 1;
1103         u8      rescan : 1;
1104         u8      ignore_device : 1;
1105         bool    aio_enabled;            /* only valid for physical disks */
1106         bool    in_remove;
1107         bool    device_offline;
1108         u8      vendor[8];              /* bytes 8-15 of inquiry data */
1109         u8      model[16];              /* bytes 16-31 of inquiry data */
1110         u64     sas_address;
1111         u8      raid_level;
1112         u16     queue_depth;            /* max. queue_depth for this device */
1113         u16     advertised_queue_depth;
1114         u32     aio_handle;
1115         u8      volume_status;
1116         u8      active_path_index;
1117         u8      path_map;
1118         u8      bay;
1119         u8      box_index;
1120         u8      phys_box_on_bus;
1121         u8      phy_connected_dev_type;
1122         u8      box[8];
1123         u16     phys_connector[8];
1124         u8      phy_id;
1125         u8      ncq_prio_enable;
1126         u8      ncq_prio_support;
1127         bool    raid_bypass_configured; /* RAID bypass configured */
1128         bool    raid_bypass_enabled;    /* RAID bypass enabled */
1129         u32     next_bypass_group[RAID_MAP_MAX_DATA_DISKS_PER_ROW];
1130         struct raid_map *raid_map;      /* RAID bypass map */
1131         u32     max_transfer_encrypted;
1132
1133         struct pqi_sas_port *sas_port;
1134         struct scsi_device *sdev;
1135
1136         struct list_head scsi_device_list_entry;
1137         struct list_head new_device_list_entry;
1138         struct list_head add_list_entry;
1139         struct list_head delete_list_entry;
1140
1141         struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
1142         atomic_t scsi_cmds_outstanding;
1143         atomic_t raid_bypass_cnt;
1144 };
1145
1146 /* VPD inquiry pages */
1147 #define CISS_VPD_LV_DEVICE_GEOMETRY     0xc1    /* vendor-specific page */
1148 #define CISS_VPD_LV_BYPASS_STATUS       0xc2    /* vendor-specific page */
1149 #define CISS_VPD_LV_STATUS              0xc3    /* vendor-specific page */
1150
1151 #define VPD_PAGE        (1 << 8)
1152
1153 #pragma pack(1)
1154
1155 /* structure for CISS_VPD_LV_STATUS */
1156 struct ciss_vpd_logical_volume_status {
1157         u8      peripheral_info;
1158         u8      page_code;
1159         u8      reserved;
1160         u8      page_length;
1161         u8      volume_status;
1162         u8      reserved2[3];
1163         __be32  flags;
1164 };
1165
1166 #pragma pack()
1167
1168 /* constants for volume_status field of ciss_vpd_logical_volume_status */
1169 #define CISS_LV_OK                                      0
1170 #define CISS_LV_FAILED                                  1
1171 #define CISS_LV_NOT_CONFIGURED                          2
1172 #define CISS_LV_DEGRADED                                3
1173 #define CISS_LV_READY_FOR_RECOVERY                      4
1174 #define CISS_LV_UNDERGOING_RECOVERY                     5
1175 #define CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED           6
1176 #define CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM       7
1177 #define CISS_LV_HARDWARE_OVERHEATING                    8
1178 #define CISS_LV_HARDWARE_HAS_OVERHEATED                 9
1179 #define CISS_LV_UNDERGOING_EXPANSION                    10
1180 #define CISS_LV_NOT_AVAILABLE                           11
1181 #define CISS_LV_QUEUED_FOR_EXPANSION                    12
1182 #define CISS_LV_DISABLED_SCSI_ID_CONFLICT               13
1183 #define CISS_LV_EJECTED                                 14
1184 #define CISS_LV_UNDERGOING_ERASE                        15
1185 /* state 16 not used */
1186 #define CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD      17
1187 #define CISS_LV_UNDERGOING_RPI                          18
1188 #define CISS_LV_PENDING_RPI                             19
1189 #define CISS_LV_ENCRYPTED_NO_KEY                        20
1190 /* state 21 not used */
1191 #define CISS_LV_UNDERGOING_ENCRYPTION                   22
1192 #define CISS_LV_UNDERGOING_ENCRYPTION_REKEYING          23
1193 #define CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER   24
1194 #define CISS_LV_PENDING_ENCRYPTION                      25
1195 #define CISS_LV_PENDING_ENCRYPTION_REKEYING             26
1196 #define CISS_LV_NOT_SUPPORTED                           27
1197 #define CISS_LV_STATUS_UNAVAILABLE                      255
1198
1199 /* constants for flags field of ciss_vpd_logical_volume_status */
1200 #define CISS_LV_FLAGS_NO_HOST_IO        0x1     /* volume not available for */
1201                                                 /* host I/O */
1202
1203 /* for SAS hosts and SAS expanders */
1204 struct pqi_sas_node {
1205         struct device *parent_dev;
1206         struct list_head port_list_head;
1207 };
1208
1209 struct pqi_sas_port {
1210         struct list_head port_list_entry;
1211         u64     sas_address;
1212         struct pqi_scsi_dev *device;
1213         struct sas_port *port;
1214         int     next_phy_index;
1215         struct list_head phy_list_head;
1216         struct pqi_sas_node *parent_node;
1217         struct sas_rphy *rphy;
1218 };
1219
1220 struct pqi_sas_phy {
1221         struct list_head phy_list_entry;
1222         struct sas_phy *phy;
1223         struct pqi_sas_port *parent_port;
1224         bool    added_to_port;
1225 };
1226
1227 struct pqi_io_request {
1228         atomic_t        refcount;
1229         u16             index;
1230         void (*io_complete_callback)(struct pqi_io_request *io_request,
1231                 void *context);
1232         void            *context;
1233         u8              raid_bypass : 1;
1234         int             status;
1235         struct pqi_queue_group *queue_group;
1236         struct scsi_cmnd *scmd;
1237         void            *error_info;
1238         struct pqi_sg_descriptor *sg_chain_buffer;
1239         dma_addr_t      sg_chain_buffer_dma_handle;
1240         void            *iu;
1241         struct list_head request_list_entry;
1242 };
1243
1244 #define PQI_NUM_SUPPORTED_EVENTS        7
1245
1246 struct pqi_event {
1247         bool    pending;
1248         u8      event_type;
1249         u16     event_id;
1250         u32     additional_event_id;
1251 };
1252
1253 #define PQI_RESERVED_IO_SLOTS_LUN_RESET                 1
1254 #define PQI_RESERVED_IO_SLOTS_EVENT_ACK                 PQI_NUM_SUPPORTED_EVENTS
1255 #define PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS      3
1256 #define PQI_RESERVED_IO_SLOTS                           \
1257         (PQI_RESERVED_IO_SLOTS_LUN_RESET + PQI_RESERVED_IO_SLOTS_EVENT_ACK + \
1258         PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS)
1259
1260 #define PQI_CTRL_PRODUCT_ID_GEN1        0
1261 #define PQI_CTRL_PRODUCT_ID_GEN2        7
1262 #define PQI_CTRL_PRODUCT_REVISION_A     0
1263 #define PQI_CTRL_PRODUCT_REVISION_B     1
1264
1265 struct pqi_ctrl_info {
1266         unsigned int    ctrl_id;
1267         struct pci_dev  *pci_dev;
1268         char            firmware_version[32];
1269         char            serial_number[17];
1270         char            model[17];
1271         char            vendor[9];
1272         u8              product_id;
1273         u8              product_revision;
1274         void __iomem    *iomem_base;
1275         struct pqi_ctrl_registers __iomem *registers;
1276         struct pqi_device_registers __iomem *pqi_registers;
1277         u32             max_sg_entries;
1278         u32             config_table_offset;
1279         u32             config_table_length;
1280         u16             max_inbound_queues;
1281         u16             max_elements_per_iq;
1282         u16             max_iq_element_length;
1283         u16             max_outbound_queues;
1284         u16             max_elements_per_oq;
1285         u16             max_oq_element_length;
1286         u32             max_transfer_size;
1287         u32             max_outstanding_requests;
1288         u32             max_io_slots;
1289         unsigned int    scsi_ml_can_queue;
1290         unsigned short  sg_tablesize;
1291         unsigned int    max_sectors;
1292         u32             error_buffer_length;
1293         void            *error_buffer;
1294         dma_addr_t      error_buffer_dma_handle;
1295         size_t          sg_chain_buffer_length;
1296         unsigned int    num_queue_groups;
1297         u16             max_hw_queue_index;
1298         u16             num_elements_per_iq;
1299         u16             num_elements_per_oq;
1300         u16             max_inbound_iu_length_per_firmware;
1301         u16             max_inbound_iu_length;
1302         unsigned int    max_sg_per_iu;
1303         unsigned int    max_sg_per_r56_iu;
1304         void            *admin_queue_memory_base;
1305         u32             admin_queue_memory_length;
1306         dma_addr_t      admin_queue_memory_base_dma_handle;
1307         void            *queue_memory_base;
1308         u32             queue_memory_length;
1309         dma_addr_t      queue_memory_base_dma_handle;
1310         struct pqi_admin_queues admin_queues;
1311         struct pqi_queue_group queue_groups[PQI_MAX_QUEUE_GROUPS];
1312         struct pqi_event_queue event_queue;
1313         enum pqi_irq_mode irq_mode;
1314         int             max_msix_vectors;
1315         int             num_msix_vectors_enabled;
1316         int             num_msix_vectors_initialized;
1317         int             event_irq;
1318         struct Scsi_Host *scsi_host;
1319
1320         struct mutex    scan_mutex;
1321         struct mutex    lun_reset_mutex;
1322         bool            controller_online;
1323         bool            block_requests;
1324         bool            scan_blocked;
1325         u8              logical_volume_rescan_needed : 1;
1326         u8              inbound_spanning_supported : 1;
1327         u8              outbound_spanning_supported : 1;
1328         u8              pqi_mode_enabled : 1;
1329         u8              pqi_reset_quiesce_supported : 1;
1330         u8              soft_reset_handshake_supported : 1;
1331         u8              raid_iu_timeout_supported : 1;
1332         u8              tmf_iu_timeout_supported : 1;
1333         u8              firmware_triage_supported : 1;
1334         u8              rpl_extended_format_4_5_supported : 1;
1335         u8              enable_r1_writes : 1;
1336         u8              enable_r5_writes : 1;
1337         u8              enable_r6_writes : 1;
1338         u8              lv_drive_type_mix_valid : 1;
1339         u8              enable_stream_detection : 1;
1340
1341         u8              ciss_report_log_flags;
1342         u32             max_transfer_encrypted_sas_sata;
1343         u32             max_transfer_encrypted_nvme;
1344         u32             max_write_raid_5_6;
1345         u32             max_write_raid_1_10_2drive;
1346         u32             max_write_raid_1_10_3drive;
1347
1348         struct list_head scsi_device_list;
1349         spinlock_t      scsi_device_list_lock;
1350
1351         struct delayed_work rescan_work;
1352         struct delayed_work update_time_work;
1353
1354         struct pqi_sas_node *sas_host;
1355         u64             sas_address;
1356
1357         struct pqi_io_request *io_request_pool;
1358         u16             next_io_request_slot;
1359
1360         struct pqi_event events[PQI_NUM_SUPPORTED_EVENTS];
1361         struct work_struct event_work;
1362
1363         atomic_t        num_interrupts;
1364         int             previous_num_interrupts;
1365         u32             previous_heartbeat_count;
1366         __le32 __iomem  *heartbeat_counter;
1367         u8 __iomem      *soft_reset_status;
1368         struct timer_list heartbeat_timer;
1369         struct work_struct ctrl_offline_work;
1370
1371         struct semaphore sync_request_sem;
1372         atomic_t        num_busy_threads;
1373         atomic_t        num_blocked_threads;
1374         wait_queue_head_t block_requests_wait;
1375
1376         struct mutex    ofa_mutex;
1377         struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
1378         dma_addr_t      pqi_ofa_mem_dma_handle;
1379         void            **pqi_ofa_chunk_virt_addr;
1380         struct work_struct ofa_memory_alloc_work;
1381         struct work_struct ofa_quiesce_work;
1382         u32             ofa_bytes_requested;
1383         u16             ofa_cancel_reason;
1384 };
1385
1386 enum pqi_ctrl_mode {
1387         SIS_MODE = 0,
1388         PQI_MODE
1389 };
1390
1391 /*
1392  * assume worst case: SATA queue depth of 31 minus 4 internal firmware commands
1393  */
1394 #define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH       27
1395
1396 /* CISS commands */
1397 #define CISS_READ               0xc0
1398 #define CISS_REPORT_LOG         0xc2    /* Report Logical LUNs */
1399 #define CISS_REPORT_PHYS        0xc3    /* Report Physical LUNs */
1400 #define CISS_GET_RAID_MAP       0xc8
1401
1402 /* BMIC commands */
1403 #define BMIC_IDENTIFY_CONTROLLER                0x11
1404 #define BMIC_IDENTIFY_PHYSICAL_DEVICE           0x15
1405 #define BMIC_READ                               0x26
1406 #define BMIC_WRITE                              0x27
1407 #define BMIC_SENSE_FEATURE                      0x61
1408 #define BMIC_SENSE_CONTROLLER_PARAMETERS        0x64
1409 #define BMIC_SENSE_SUBSYSTEM_INFORMATION        0x66
1410 #define BMIC_CSMI_PASSTHRU                      0x68
1411 #define BMIC_WRITE_HOST_WELLNESS                0xa5
1412 #define BMIC_FLUSH_CACHE                        0xc2
1413 #define BMIC_SET_DIAG_OPTIONS                   0xf4
1414 #define BMIC_SENSE_DIAG_OPTIONS                 0xf5
1415
1416 #define CSMI_CC_SAS_SMP_PASSTHRU                0x17
1417
1418 #define SA_FLUSH_CACHE                          0x1
1419
1420 #define MASKED_DEVICE(lunid)                    ((lunid)[3] & 0xc0)
1421 #define CISS_GET_LEVEL_2_BUS(lunid)             ((lunid)[7] & 0x3f)
1422 #define CISS_GET_LEVEL_2_TARGET(lunid)          ((lunid)[6])
1423 #define CISS_GET_DRIVE_NUMBER(lunid)            \
1424         (((CISS_GET_LEVEL_2_BUS((lunid)) - 1) << 8) + \
1425         CISS_GET_LEVEL_2_TARGET((lunid)))
1426
1427 #define LV_GET_DRIVE_TYPE_MIX(lunid)            ((lunid)[6])
1428
1429 #define LV_DRIVE_TYPE_MIX_UNKNOWN               0
1430 #define LV_DRIVE_TYPE_MIX_NO_RESTRICTION        1
1431 #define LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY          2
1432 #define LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY         3
1433 #define LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY  4
1434 #define LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY          5
1435 #define LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY         6
1436 #define LV_DRIVE_TYPE_MIX_SAS_ONLY              7
1437 #define LV_DRIVE_TYPE_MIX_SATA_ONLY             8
1438 #define LV_DRIVE_TYPE_MIX_NVME_ONLY             9
1439
1440 #define NO_TIMEOUT              ((unsigned long) -1)
1441
1442 #pragma pack(1)
1443
1444 struct bmic_identify_controller {
1445         u8      configured_logical_drive_count;
1446         __le32  configuration_signature;
1447         u8      firmware_version_short[4];
1448         u8      reserved[145];
1449         __le16  extended_logical_unit_count;
1450         u8      reserved1[34];
1451         __le16  firmware_build_number;
1452         u8      reserved2[8];
1453         u8      vendor_id[8];
1454         u8      product_id[16];
1455         u8      reserved3[62];
1456         __le32  extra_controller_flags;
1457         u8      reserved4[2];
1458         u8      controller_mode;
1459         u8      spare_part_number[32];
1460         u8      firmware_version_long[32];
1461 };
1462
1463 /* constants for extra_controller_flags field of bmic_identify_controller */
1464 #define BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED     0x20000000
1465
1466 struct bmic_sense_subsystem_info {
1467         u8      reserved[44];
1468         u8      ctrl_serial_number[16];
1469 };
1470
1471 /* constants for device_type field */
1472 #define SA_DEVICE_TYPE_SATA             0x1
1473 #define SA_DEVICE_TYPE_SAS              0x2
1474 #define SA_DEVICE_TYPE_EXPANDER_SMP     0x5
1475 #define SA_DEVICE_TYPE_SES              0x6
1476 #define SA_DEVICE_TYPE_CONTROLLER       0x7
1477 #define SA_DEVICE_TYPE_NVME             0x9
1478
1479 struct bmic_identify_physical_device {
1480         u8      scsi_bus;               /* SCSI Bus number on controller */
1481         u8      scsi_id;                /* SCSI ID on this bus */
1482         __le16  block_size;             /* sector size in bytes */
1483         __le32  total_blocks;           /* number for sectors on drive */
1484         __le32  reserved_blocks;        /* controller reserved (RIS) */
1485         u8      model[40];              /* Physical Drive Model */
1486         u8      serial_number[40];      /* Drive Serial Number */
1487         u8      firmware_revision[8];   /* drive firmware revision */
1488         u8      scsi_inquiry_bits;      /* inquiry byte 7 bits */
1489         u8      compaq_drive_stamp;     /* 0 means drive not stamped */
1490         u8      last_failure_reason;
1491         u8      flags;
1492         u8      more_flags;
1493         u8      scsi_lun;               /* SCSI LUN for phys drive */
1494         u8      yet_more_flags;
1495         u8      even_more_flags;
1496         __le32  spi_speed_rules;
1497         u8      phys_connector[2];      /* connector number on controller */
1498         u8      phys_box_on_bus;        /* phys enclosure this drive resides */
1499         u8      phys_bay_in_box;        /* phys drv bay this drive resides */
1500         __le32  rpm;                    /* drive rotational speed in RPM */
1501         u8      device_type;            /* type of drive */
1502         u8      sata_version;           /* only valid when device_type = */
1503                                         /* SA_DEVICE_TYPE_SATA */
1504         __le64  big_total_block_count;
1505         __le64  ris_starting_lba;
1506         __le32  ris_size;
1507         u8      wwid[20];
1508         u8      controller_phy_map[32];
1509         __le16  phy_count;
1510         u8      phy_connected_dev_type[256];
1511         u8      phy_to_drive_bay_num[256];
1512         __le16  phy_to_attached_dev_index[256];
1513         u8      box_index;
1514         u8      reserved;
1515         __le16  extra_physical_drive_flags;
1516         u8      negotiated_link_rate[256];
1517         u8      phy_to_phy_map[256];
1518         u8      redundant_path_present_map;
1519         u8      redundant_path_failure_map;
1520         u8      active_path_number;
1521         __le16  alternate_paths_phys_connector[8];
1522         u8      alternate_paths_phys_box_on_port[8];
1523         u8      multi_lun_device_lun_count;
1524         u8      minimum_good_fw_revision[8];
1525         u8      unique_inquiry_bytes[20];
1526         u8      current_temperature_degrees;
1527         u8      temperature_threshold_degrees;
1528         u8      max_temperature_degrees;
1529         u8      logical_blocks_per_phys_block_exp;
1530         __le16  current_queue_depth_limit;
1531         u8      switch_name[10];
1532         __le16  switch_port;
1533         u8      alternate_paths_switch_name[40];
1534         u8      alternate_paths_switch_port[8];
1535         __le16  power_on_hours;
1536         __le16  percent_endurance_used;
1537         u8      drive_authentication;
1538         u8      smart_carrier_authentication;
1539         u8      smart_carrier_app_fw_version;
1540         u8      smart_carrier_bootloader_fw_version;
1541         u8      sanitize_flags;
1542         u8      encryption_key_flags;
1543         u8      encryption_key_name[64];
1544         __le32  misc_drive_flags;
1545         __le16  dek_index;
1546         __le16  hba_drive_encryption_flags;
1547         __le16  max_overwrite_time;
1548         __le16  max_block_erase_time;
1549         __le16  max_crypto_erase_time;
1550         u8      connector_info[5];
1551         u8      connector_name[8][8];
1552         u8      page_83_identifier[16];
1553         u8      maximum_link_rate[256];
1554         u8      negotiated_physical_link_rate[256];
1555         u8      box_connector_name[8];
1556         u8      padding_to_multiple_of_512[9];
1557 };
1558
1559 #define BMIC_SENSE_FEATURE_IO_PAGE              0x8
1560 #define BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE  0x2
1561
1562 struct bmic_sense_feature_buffer_header {
1563         u8      page_code;
1564         u8      subpage_code;
1565         __le16  buffer_length;
1566 };
1567
1568 struct bmic_sense_feature_page_header {
1569         u8      page_code;
1570         u8      subpage_code;
1571         __le16  page_length;
1572 };
1573
1574 struct bmic_sense_feature_io_page_aio_subpage {
1575         struct bmic_sense_feature_page_header header;
1576         u8      firmware_read_support;
1577         u8      driver_read_support;
1578         u8      firmware_write_support;
1579         u8      driver_write_support;
1580         __le16  max_transfer_encrypted_sas_sata;
1581         __le16  max_transfer_encrypted_nvme;
1582         __le16  max_write_raid_5_6;
1583         __le16  max_write_raid_1_10_2drive;
1584         __le16  max_write_raid_1_10_3drive;
1585 };
1586
1587 struct bmic_smp_request {
1588         u8      frame_type;
1589         u8      function;
1590         u8      allocated_response_length;
1591         u8      request_length;
1592         u8      additional_request_bytes[1016];
1593 };
1594
1595 struct  bmic_smp_response {
1596         u8      frame_type;
1597         u8      function;
1598         u8      function_result;
1599         u8      response_length;
1600         u8      additional_response_bytes[1016];
1601 };
1602
1603 struct bmic_csmi_ioctl_header {
1604         __le32  header_length;
1605         u8      signature[8];
1606         __le32  timeout;
1607         __le32  control_code;
1608         __le32  return_code;
1609         __le32  length;
1610 };
1611
1612 struct bmic_csmi_smp_passthru {
1613         u8      phy_identifier;
1614         u8      port_identifier;
1615         u8      connection_rate;
1616         u8      reserved;
1617         __be64  destination_sas_address;
1618         __le32  request_length;
1619         struct bmic_smp_request request;
1620         u8      connection_status;
1621         u8      reserved1[3];
1622         __le32  response_length;
1623         struct bmic_smp_response response;
1624 };
1625
1626 struct bmic_csmi_smp_passthru_buffer {
1627         struct bmic_csmi_ioctl_header ioctl_header;
1628         struct bmic_csmi_smp_passthru parameters;
1629 };
1630
1631 struct bmic_flush_cache {
1632         u8      disable_flag;
1633         u8      system_power_action;
1634         u8      ndu_flush;
1635         u8      shutdown_event;
1636         u8      reserved[28];
1637 };
1638
1639 /* for shutdown_event member of struct bmic_flush_cache */
1640 enum bmic_flush_cache_shutdown_event {
1641         NONE_CACHE_FLUSH_ONLY = 0,
1642         SHUTDOWN = 1,
1643         HIBERNATE = 2,
1644         SUSPEND = 3,
1645         RESTART = 4
1646 };
1647
1648 struct bmic_diag_options {
1649         __le32 options;
1650 };
1651
1652 #pragma pack()
1653
1654 static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
1655 {
1656         void *hostdata = shost_priv(shost);
1657
1658         return *((struct pqi_ctrl_info **)hostdata);
1659 }
1660
1661 void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
1662         struct sas_rphy *rphy);
1663
1664 int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info);
1665 void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info);
1666 int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
1667         struct pqi_scsi_dev *device);
1668 void pqi_remove_sas_device(struct pqi_scsi_dev *device);
1669 struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
1670         struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy);
1671 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd);
1672 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
1673         struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
1674         struct pqi_raid_error_info *error_info);
1675
1676 extern struct sas_function_template pqi_sas_transport_functions;
1677
1678 #endif /* _SMARTPQI_H */