Merge branch 'work.file' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / drivers / scsi / smartpqi / smartpqi.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *    driver for Microsemi PQI-based storage controllers
4  *    Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
5  *    Copyright (c) 2016-2018 Microsemi Corporation
6  *    Copyright (c) 2016 PMC-Sierra, Inc.
7  *
8  *    Questions/Comments/Bugfixes to storagedev@microchip.com
9  *
10  */
11
12 #include <linux/io-64-nonatomic-lo-hi.h>
13
14 #if !defined(_SMARTPQI_H)
15 #define _SMARTPQI_H
16
17 #include <scsi/scsi_host.h>
18 #include <linux/bsg-lib.h>
19
20 #pragma pack(1)
21
22 #define PQI_DEVICE_SIGNATURE    "PQI DREG"
23
24 /* This structure is defined by the PQI specification. */
25 struct pqi_device_registers {
26         __le64  signature;
27         u8      function_and_status_code;
28         u8      reserved[7];
29         u8      max_admin_iq_elements;
30         u8      max_admin_oq_elements;
31         u8      admin_iq_element_length;        /* in 16-byte units */
32         u8      admin_oq_element_length;        /* in 16-byte units */
33         __le16  max_reset_timeout;              /* in 100-millisecond units */
34         u8      reserved1[2];
35         __le32  legacy_intx_status;
36         __le32  legacy_intx_mask_set;
37         __le32  legacy_intx_mask_clear;
38         u8      reserved2[28];
39         __le32  device_status;
40         u8      reserved3[4];
41         __le64  admin_iq_pi_offset;
42         __le64  admin_oq_ci_offset;
43         __le64  admin_iq_element_array_addr;
44         __le64  admin_oq_element_array_addr;
45         __le64  admin_iq_ci_addr;
46         __le64  admin_oq_pi_addr;
47         u8      admin_iq_num_elements;
48         u8      admin_oq_num_elements;
49         __le16  admin_queue_int_msg_num;
50         u8      reserved4[4];
51         __le32  device_error;
52         u8      reserved5[4];
53         __le64  error_details;
54         __le32  device_reset;
55         __le32  power_action;
56         u8      reserved6[104];
57 };
58
59 /*
60  * controller registers
61  *
62  * These are defined by the Microsemi implementation.
63  *
64  * Some registers (those named sis_*) are only used when in
65  * legacy SIS mode before we transition the controller into
66  * PQI mode.  There are a number of other SIS mode registers,
67  * but we don't use them, so only the SIS registers that we
68  * care about are defined here.  The offsets mentioned in the
69  * comments are the offsets from the PCIe BAR 0.
70  */
71 struct pqi_ctrl_registers {
72         u8      reserved[0x20];
73         __le32  sis_host_to_ctrl_doorbell;              /* 20h */
74         u8      reserved1[0x34 - (0x20 + sizeof(__le32))];
75         __le32  sis_interrupt_mask;                     /* 34h */
76         u8      reserved2[0x9c - (0x34 + sizeof(__le32))];
77         __le32  sis_ctrl_to_host_doorbell;              /* 9Ch */
78         u8      reserved3[0xa0 - (0x9c + sizeof(__le32))];
79         __le32  sis_ctrl_to_host_doorbell_clear;        /* A0h */
80         u8      reserved4[0xb0 - (0xa0 + sizeof(__le32))];
81         __le32  sis_driver_scratch;                     /* B0h */
82         __le32  sis_product_identifier;                 /* B4h */
83         u8      reserved5[0xbc - (0xb4 + sizeof(__le32))];
84         __le32  sis_firmware_status;                    /* BCh */
85         u8      reserved6[0x1000 - (0xbc + sizeof(__le32))];
86         __le32  sis_mailbox[8];                         /* 1000h */
87         u8      reserved7[0x4000 - (0x1000 + (sizeof(__le32) * 8))];
88         /*
89          * The PQI spec states that the PQI registers should be at
90          * offset 0 from the PCIe BAR 0.  However, we can't map
91          * them at offset 0 because that would break compatibility
92          * with the SIS registers.  So we map them at offset 4000h.
93          */
94         struct pqi_device_registers pqi_registers;      /* 4000h */
95 };
96
97 #if ((HZ) < 1000)
98 #define PQI_HZ  1000
99 #else
100 #define PQI_HZ  (HZ)
101 #endif
102
103 #define PQI_DEVICE_REGISTERS_OFFSET     0x4000
104
105 enum pqi_io_path {
106         RAID_PATH = 0,
107         AIO_PATH = 1
108 };
109
110 enum pqi_irq_mode {
111         IRQ_MODE_NONE,
112         IRQ_MODE_INTX,
113         IRQ_MODE_MSIX
114 };
115
116 struct pqi_sg_descriptor {
117         __le64  address;
118         __le32  length;
119         __le32  flags;
120 };
121
122 /* manifest constants for the flags field of pqi_sg_descriptor */
123 #define CISS_SG_LAST    0x40000000
124 #define CISS_SG_CHAIN   0x80000000
125
126 struct pqi_iu_header {
127         u8      iu_type;
128         u8      reserved;
129         __le16  iu_length;      /* in bytes - does not include the length */
130                                 /* of this header */
131         __le16  response_queue_id;      /* specifies the OQ where the */
132                                         /* response IU is to be delivered */
133         u16     driver_flags;   /* reserved for driver use */
134 };
135
136 /* manifest constants for pqi_iu_header.driver_flags */
137 #define PQI_DRIVER_NONBLOCKABLE_REQUEST         0x1
138
139 /*
140  * According to the PQI spec, the IU header is only the first 4 bytes of our
141  * pqi_iu_header structure.
142  */
143 #define PQI_REQUEST_HEADER_LENGTH       4
144
145 struct pqi_general_admin_request {
146         struct pqi_iu_header header;
147         __le16  request_id;
148         u8      function_code;
149         union {
150                 struct {
151                         u8      reserved[33];
152                         __le32  buffer_length;
153                         struct pqi_sg_descriptor sg_descriptor;
154                 } report_device_capability;
155
156                 struct {
157                         u8      reserved;
158                         __le16  queue_id;
159                         u8      reserved1[2];
160                         __le64  element_array_addr;
161                         __le64  ci_addr;
162                         __le16  num_elements;
163                         __le16  element_length;
164                         u8      queue_protocol;
165                         u8      reserved2[23];
166                         __le32  vendor_specific;
167                 } create_operational_iq;
168
169                 struct {
170                         u8      reserved;
171                         __le16  queue_id;
172                         u8      reserved1[2];
173                         __le64  element_array_addr;
174                         __le64  pi_addr;
175                         __le16  num_elements;
176                         __le16  element_length;
177                         u8      queue_protocol;
178                         u8      reserved2[3];
179                         __le16  int_msg_num;
180                         __le16  coalescing_count;
181                         __le32  min_coalescing_time;
182                         __le32  max_coalescing_time;
183                         u8      reserved3[8];
184                         __le32  vendor_specific;
185                 } create_operational_oq;
186
187                 struct {
188                         u8      reserved;
189                         __le16  queue_id;
190                         u8      reserved1[50];
191                 } delete_operational_queue;
192
193                 struct {
194                         u8      reserved;
195                         __le16  queue_id;
196                         u8      reserved1[46];
197                         __le32  vendor_specific;
198                 } change_operational_iq_properties;
199
200         } data;
201 };
202
203 struct pqi_general_admin_response {
204         struct pqi_iu_header header;
205         __le16  request_id;
206         u8      function_code;
207         u8      status;
208         union {
209                 struct {
210                         u8      status_descriptor[4];
211                         __le64  iq_pi_offset;
212                         u8      reserved[40];
213                 } create_operational_iq;
214
215                 struct {
216                         u8      status_descriptor[4];
217                         __le64  oq_ci_offset;
218                         u8      reserved[40];
219                 } create_operational_oq;
220         } data;
221 };
222
223 struct pqi_iu_layer_descriptor {
224         u8      inbound_spanning_supported : 1;
225         u8      reserved : 7;
226         u8      reserved1[5];
227         __le16  max_inbound_iu_length;
228         u8      outbound_spanning_supported : 1;
229         u8      reserved2 : 7;
230         u8      reserved3[5];
231         __le16  max_outbound_iu_length;
232 };
233
234 struct pqi_device_capability {
235         __le16  data_length;
236         u8      reserved[6];
237         u8      iq_arbitration_priority_support_bitmask;
238         u8      maximum_aw_a;
239         u8      maximum_aw_b;
240         u8      maximum_aw_c;
241         u8      max_arbitration_burst : 3;
242         u8      reserved1 : 4;
243         u8      iqa : 1;
244         u8      reserved2[2];
245         u8      iq_freeze : 1;
246         u8      reserved3 : 7;
247         __le16  max_inbound_queues;
248         __le16  max_elements_per_iq;
249         u8      reserved4[4];
250         __le16  max_iq_element_length;
251         __le16  min_iq_element_length;
252         u8      reserved5[2];
253         __le16  max_outbound_queues;
254         __le16  max_elements_per_oq;
255         __le16  intr_coalescing_time_granularity;
256         __le16  max_oq_element_length;
257         __le16  min_oq_element_length;
258         u8      reserved6[24];
259         struct pqi_iu_layer_descriptor iu_layer_descriptors[32];
260 };
261
262 #define PQI_MAX_EMBEDDED_SG_DESCRIPTORS         4
263 #define PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS     3
264
265 struct pqi_raid_path_request {
266         struct pqi_iu_header header;
267         __le16  request_id;
268         __le16  nexus_id;
269         __le32  buffer_length;
270         u8      lun_number[8];
271         __le16  protocol_specific;
272         u8      data_direction : 2;
273         u8      partial : 1;
274         u8      reserved1 : 4;
275         u8      fence : 1;
276         __le16  error_index;
277         u8      reserved2;
278         u8      task_attribute : 3;
279         u8      command_priority : 4;
280         u8      reserved3 : 1;
281         u8      reserved4 : 2;
282         u8      additional_cdb_bytes_usage : 3;
283         u8      reserved5 : 3;
284         u8      cdb[16];
285         u8      reserved6[12];
286         __le32  timeout;
287         struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
288 };
289
290 struct pqi_aio_path_request {
291         struct pqi_iu_header header;
292         __le16  request_id;
293         u8      reserved1[2];
294         __le32  nexus_id;
295         __le32  buffer_length;
296         u8      data_direction : 2;
297         u8      partial : 1;
298         u8      memory_type : 1;
299         u8      fence : 1;
300         u8      encryption_enable : 1;
301         u8      reserved2 : 2;
302         u8      task_attribute : 3;
303         u8      command_priority : 4;
304         u8      reserved3 : 1;
305         __le16  data_encryption_key_index;
306         __le32  encrypt_tweak_lower;
307         __le32  encrypt_tweak_upper;
308         u8      cdb[16];
309         __le16  error_index;
310         u8      num_sg_descriptors;
311         u8      cdb_length;
312         u8      lun_number[8];
313         u8      reserved4[4];
314         struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
315 };
316
317 #define PQI_RAID1_NVME_XFER_LIMIT       (32 * 1024)     /* 32 KiB */
318
319 struct pqi_aio_r1_path_request {
320         struct pqi_iu_header header;
321         __le16  request_id;
322         __le16  volume_id;      /* ID of the RAID volume */
323         __le32  it_nexus_1;     /* IT nexus of the 1st drive in the RAID volume */
324         __le32  it_nexus_2;     /* IT nexus of the 2nd drive in the RAID volume */
325         __le32  it_nexus_3;     /* IT nexus of the 3rd drive in the RAID volume */
326         __le32  data_length;    /* total bytes to read/write */
327         u8      data_direction : 2;
328         u8      partial : 1;
329         u8      memory_type : 1;
330         u8      fence : 1;
331         u8      encryption_enable : 1;
332         u8      reserved : 2;
333         u8      task_attribute : 3;
334         u8      command_priority : 4;
335         u8      reserved2 : 1;
336         __le16  data_encryption_key_index;
337         u8      cdb[16];
338         __le16  error_index;
339         u8      num_sg_descriptors;
340         u8      cdb_length;
341         u8      num_drives;     /* number of drives in the RAID volume (2 or 3) */
342         u8      reserved3[3];
343         __le32  encrypt_tweak_lower;
344         __le32  encrypt_tweak_upper;
345         struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
346 };
347
348 #define PQI_DEFAULT_MAX_WRITE_RAID_5_6                  (8 * 1024U)
349 #define PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA     (~0U)
350 #define PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME         (32 * 1024U)
351
352 struct pqi_aio_r56_path_request {
353         struct pqi_iu_header header;
354         __le16  request_id;
355         __le16  volume_id;              /* ID of the RAID volume */
356         __le32  data_it_nexus;          /* IT nexus for the data drive */
357         __le32  p_parity_it_nexus;      /* IT nexus for the P parity drive */
358         __le32  q_parity_it_nexus;      /* IT nexus for the Q parity drive */
359         __le32  data_length;            /* total bytes to read/write */
360         u8      data_direction : 2;
361         u8      partial : 1;
362         u8      mem_type : 1;           /* 0 = PCIe, 1 = DDR */
363         u8      fence : 1;
364         u8      encryption_enable : 1;
365         u8      reserved : 2;
366         u8      task_attribute : 3;
367         u8      command_priority : 4;
368         u8      reserved1 : 1;
369         __le16  data_encryption_key_index;
370         u8      cdb[16];
371         __le16  error_index;
372         u8      num_sg_descriptors;
373         u8      cdb_length;
374         u8      xor_multiplier;
375         u8      reserved2[3];
376         __le32  encrypt_tweak_lower;
377         __le32  encrypt_tweak_upper;
378         __le64  row;                    /* row = logical LBA/blocks per row */
379         u8      reserved3[8];
380         struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS];
381 };
382
383 struct pqi_io_response {
384         struct pqi_iu_header header;
385         __le16  request_id;
386         __le16  error_index;
387         u8      reserved2[4];
388 };
389
390 struct pqi_general_management_request {
391         struct pqi_iu_header header;
392         __le16  request_id;
393         union {
394                 struct {
395                         u8      reserved[2];
396                         __le32  buffer_length;
397                         struct pqi_sg_descriptor sg_descriptors[3];
398                 } report_event_configuration;
399
400                 struct {
401                         __le16  global_event_oq_id;
402                         __le32  buffer_length;
403                         struct pqi_sg_descriptor sg_descriptors[3];
404                 } set_event_configuration;
405         } data;
406 };
407
408 struct pqi_event_descriptor {
409         u8      event_type;
410         u8      reserved;
411         __le16  oq_id;
412 };
413
414 struct pqi_event_config {
415         u8      reserved[2];
416         u8      num_event_descriptors;
417         u8      reserved1;
418         struct pqi_event_descriptor descriptors[1];
419 };
420
421 #define PQI_MAX_EVENT_DESCRIPTORS       255
422
423 #define PQI_EVENT_OFA_MEMORY_ALLOCATION 0x0
424 #define PQI_EVENT_OFA_QUIESCE           0x1
425 #define PQI_EVENT_OFA_CANCELED          0x2
426
427 struct pqi_event_response {
428         struct pqi_iu_header header;
429         u8      event_type;
430         u8      reserved2 : 7;
431         u8      request_acknowledge : 1;
432         __le16  event_id;
433         __le32  additional_event_id;
434         union {
435                 struct {
436                         __le32  bytes_requested;
437                         u8      reserved[12];
438                 } ofa_memory_allocation;
439
440                 struct {
441                         __le16  reason;         /* reason for cancellation */
442                         u8      reserved[14];
443                 } ofa_cancelled;
444         } data;
445 };
446
447 struct pqi_event_acknowledge_request {
448         struct pqi_iu_header header;
449         u8      event_type;
450         u8      reserved2;
451         __le16  event_id;
452         __le32  additional_event_id;
453 };
454
455 struct pqi_task_management_request {
456         struct pqi_iu_header header;
457         __le16  request_id;
458         __le16  nexus_id;
459         u8      reserved[2];
460         __le16  timeout;
461         u8      lun_number[8];
462         __le16  protocol_specific;
463         __le16  outbound_queue_id_to_manage;
464         __le16  request_id_to_manage;
465         u8      task_management_function;
466         u8      reserved2 : 7;
467         u8      fence : 1;
468 };
469
470 #define SOP_TASK_MANAGEMENT_LUN_RESET   0x8
471
472 struct pqi_task_management_response {
473         struct pqi_iu_header header;
474         __le16  request_id;
475         __le16  nexus_id;
476         u8      additional_response_info[3];
477         u8      response_code;
478 };
479
480 struct pqi_vendor_general_request {
481         struct pqi_iu_header header;
482         __le16  request_id;
483         __le16  function_code;
484         union {
485                 struct {
486                         __le16  first_section;
487                         __le16  last_section;
488                         u8      reserved[48];
489                 } config_table_update;
490
491                 struct {
492                         __le64  buffer_address;
493                         __le32  buffer_length;
494                         u8      reserved[40];
495                 } ofa_memory_allocation;
496         } data;
497 };
498
499 struct pqi_vendor_general_response {
500         struct pqi_iu_header header;
501         __le16  request_id;
502         __le16  function_code;
503         __le16  status;
504         u8      reserved[2];
505 };
506
507 #define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE  0
508 #define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE   1
509
510 #define PQI_OFA_VERSION                 1
511 #define PQI_OFA_SIGNATURE               "OFA_QRM"
512 #define PQI_OFA_MAX_SG_DESCRIPTORS      64
513
514 struct pqi_ofa_memory {
515         __le64  signature;      /* "OFA_QRM" */
516         __le16  version;        /* version of this struct (1 = 1st version) */
517         u8      reserved[62];
518         __le32  bytes_allocated;        /* total allocated memory in bytes */
519         __le16  num_memory_descriptors;
520         u8      reserved1[2];
521         struct pqi_sg_descriptor sg_descriptor[PQI_OFA_MAX_SG_DESCRIPTORS];
522 };
523
524 struct pqi_aio_error_info {
525         u8      status;
526         u8      service_response;
527         u8      data_present;
528         u8      reserved;
529         __le32  residual_count;
530         __le16  data_length;
531         __le16  reserved1;
532         u8      data[256];
533 };
534
535 struct pqi_raid_error_info {
536         u8      data_in_result;
537         u8      data_out_result;
538         u8      reserved[3];
539         u8      status;
540         __le16  status_qualifier;
541         __le16  sense_data_length;
542         __le16  response_data_length;
543         __le32  data_in_transferred;
544         __le32  data_out_transferred;
545         u8      data[256];
546 };
547
548 #define PQI_REQUEST_IU_TASK_MANAGEMENT                  0x13
549 #define PQI_REQUEST_IU_RAID_PATH_IO                     0x14
550 #define PQI_REQUEST_IU_AIO_PATH_IO                      0x15
551 #define PQI_REQUEST_IU_AIO_PATH_RAID5_IO                0x18
552 #define PQI_REQUEST_IU_AIO_PATH_RAID6_IO                0x19
553 #define PQI_REQUEST_IU_AIO_PATH_RAID1_IO                0x1A
554 #define PQI_REQUEST_IU_GENERAL_ADMIN                    0x60
555 #define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG       0x72
556 #define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG          0x73
557 #define PQI_REQUEST_IU_VENDOR_GENERAL                   0x75
558 #define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT         0xf6
559
560 #define PQI_RESPONSE_IU_GENERAL_MANAGEMENT              0x81
561 #define PQI_RESPONSE_IU_TASK_MANAGEMENT                 0x93
562 #define PQI_RESPONSE_IU_GENERAL_ADMIN                   0xe0
563 #define PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS            0xf0
564 #define PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS             0xf1
565 #define PQI_RESPONSE_IU_RAID_PATH_IO_ERROR              0xf2
566 #define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR               0xf3
567 #define PQI_RESPONSE_IU_AIO_PATH_DISABLED               0xf4
568 #define PQI_RESPONSE_IU_VENDOR_EVENT                    0xf5
569 #define PQI_RESPONSE_IU_VENDOR_GENERAL                  0xf7
570
571 #define PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY     0x0
572 #define PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ                    0x10
573 #define PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ                    0x11
574 #define PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ                    0x12
575 #define PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ                    0x13
576 #define PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY           0x14
577
578 #define PQI_GENERAL_ADMIN_STATUS_SUCCESS        0x0
579
580 #define PQI_IQ_PROPERTY_IS_AIO_QUEUE    0x1
581
582 #define PQI_GENERAL_ADMIN_IU_LENGTH             0x3c
583 #define PQI_PROTOCOL_SOP                        0x0
584
585 #define PQI_DATA_IN_OUT_GOOD                                    0x0
586 #define PQI_DATA_IN_OUT_UNDERFLOW                               0x1
587 #define PQI_DATA_IN_OUT_BUFFER_ERROR                            0x40
588 #define PQI_DATA_IN_OUT_BUFFER_OVERFLOW                         0x41
589 #define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA         0x42
590 #define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE                  0x43
591 #define PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR                       0x60
592 #define PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT                 0x61
593 #define PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED           0x62
594 #define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED       0x63
595 #define PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED                  0x64
596 #define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST                0x65
597 #define PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION                      0x66
598 #define PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED                 0x67
599 #define PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ               0x6F
600 #define PQI_DATA_IN_OUT_ERROR                                   0xf0
601 #define PQI_DATA_IN_OUT_PROTOCOL_ERROR                          0xf1
602 #define PQI_DATA_IN_OUT_HARDWARE_ERROR                          0xf2
603 #define PQI_DATA_IN_OUT_UNSOLICITED_ABORT                       0xf3
604 #define PQI_DATA_IN_OUT_ABORTED                                 0xf4
605 #define PQI_DATA_IN_OUT_TIMEOUT                                 0xf5
606
607 #define CISS_CMD_STATUS_SUCCESS                 0x0
608 #define CISS_CMD_STATUS_TARGET_STATUS           0x1
609 #define CISS_CMD_STATUS_DATA_UNDERRUN           0x2
610 #define CISS_CMD_STATUS_DATA_OVERRUN            0x3
611 #define CISS_CMD_STATUS_INVALID                 0x4
612 #define CISS_CMD_STATUS_PROTOCOL_ERROR          0x5
613 #define CISS_CMD_STATUS_HARDWARE_ERROR          0x6
614 #define CISS_CMD_STATUS_CONNECTION_LOST         0x7
615 #define CISS_CMD_STATUS_ABORTED                 0x8
616 #define CISS_CMD_STATUS_ABORT_FAILED            0x9
617 #define CISS_CMD_STATUS_UNSOLICITED_ABORT       0xa
618 #define CISS_CMD_STATUS_TIMEOUT                 0xb
619 #define CISS_CMD_STATUS_UNABORTABLE             0xc
620 #define CISS_CMD_STATUS_TMF                     0xd
621 #define CISS_CMD_STATUS_AIO_DISABLED            0xe
622
623 #define PQI_CMD_STATUS_ABORTED  CISS_CMD_STATUS_ABORTED
624
625 #define PQI_NUM_EVENT_QUEUE_ELEMENTS    32
626 #define PQI_EVENT_OQ_ELEMENT_LENGTH     sizeof(struct pqi_event_response)
627
628 #define PQI_EVENT_TYPE_HOTPLUG                  0x1
629 #define PQI_EVENT_TYPE_HARDWARE                 0x2
630 #define PQI_EVENT_TYPE_PHYSICAL_DEVICE          0x4
631 #define PQI_EVENT_TYPE_LOGICAL_DEVICE           0x5
632 #define PQI_EVENT_TYPE_OFA                      0xfb
633 #define PQI_EVENT_TYPE_AIO_STATE_CHANGE         0xfd
634 #define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE        0xfe
635
636 #pragma pack()
637
638 #define PQI_ERROR_BUFFER_ELEMENT_LENGTH         \
639         sizeof(struct pqi_raid_error_info)
640
641 /* these values are based on our implementation */
642 #define PQI_ADMIN_IQ_NUM_ELEMENTS               8
643 #define PQI_ADMIN_OQ_NUM_ELEMENTS               20
644 #define PQI_ADMIN_IQ_ELEMENT_LENGTH             64
645 #define PQI_ADMIN_OQ_ELEMENT_LENGTH             64
646
647 #define PQI_OPERATIONAL_IQ_ELEMENT_LENGTH       128
648 #define PQI_OPERATIONAL_OQ_ELEMENT_LENGTH       16
649
650 #define PQI_MIN_MSIX_VECTORS            1
651 #define PQI_MAX_MSIX_VECTORS            64
652
653 /* these values are defined by the PQI spec */
654 #define PQI_MAX_NUM_ELEMENTS_ADMIN_QUEUE        255
655 #define PQI_MAX_NUM_ELEMENTS_OPERATIONAL_QUEUE  65535
656
657 #define PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT       64
658 #define PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT      16
659 #define PQI_ADMIN_INDEX_ALIGNMENT               64
660 #define PQI_OPERATIONAL_INDEX_ALIGNMENT         4
661
662 #define PQI_MIN_OPERATIONAL_QUEUE_ID            1
663 #define PQI_MAX_OPERATIONAL_QUEUE_ID            65535
664
665 #define PQI_AIO_SERV_RESPONSE_COMPLETE          0
666 #define PQI_AIO_SERV_RESPONSE_FAILURE           1
667 #define PQI_AIO_SERV_RESPONSE_TMF_COMPLETE      2
668 #define PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED     3
669 #define PQI_AIO_SERV_RESPONSE_TMF_REJECTED      4
670 #define PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN 5
671
672 #define PQI_AIO_STATUS_IO_ERROR                 0x1
673 #define PQI_AIO_STATUS_IO_ABORTED               0x2
674 #define PQI_AIO_STATUS_NO_PATH_TO_DEVICE        0x3
675 #define PQI_AIO_STATUS_INVALID_DEVICE           0x4
676 #define PQI_AIO_STATUS_AIO_PATH_DISABLED        0xe
677 #define PQI_AIO_STATUS_UNDERRUN                 0x51
678 #define PQI_AIO_STATUS_OVERRUN                  0x75
679
680 typedef u32 pqi_index_t;
681
682 /* SOP data direction flags */
683 #define SOP_NO_DIRECTION_FLAG   0
684 #define SOP_WRITE_FLAG          1       /* host writes data to Data-Out */
685                                         /* buffer */
686 #define SOP_READ_FLAG           2       /* host receives data from Data-In */
687                                         /* buffer */
688 #define SOP_BIDIRECTIONAL       3       /* data is transferred from the */
689                                         /* Data-Out buffer and data is */
690                                         /* transferred to the Data-In buffer */
691
692 #define SOP_TASK_ATTRIBUTE_SIMPLE               0
693 #define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE        1
694 #define SOP_TASK_ATTRIBUTE_ORDERED              2
695 #define SOP_TASK_ATTRIBUTE_ACA                  4
696
697 #define SOP_TMF_COMPLETE                0x0
698 #define SOP_TMF_REJECTED                0x4
699 #define SOP_TMF_FUNCTION_SUCCEEDED      0x8
700
701 /* additional CDB bytes usage field codes */
702 #define SOP_ADDITIONAL_CDB_BYTES_0      0       /* 16-byte CDB */
703 #define SOP_ADDITIONAL_CDB_BYTES_4      1       /* 20-byte CDB */
704 #define SOP_ADDITIONAL_CDB_BYTES_8      2       /* 24-byte CDB */
705 #define SOP_ADDITIONAL_CDB_BYTES_12     3       /* 28-byte CDB */
706 #define SOP_ADDITIONAL_CDB_BYTES_16     4       /* 32-byte CDB */
707
708 /*
709  * The purpose of this structure is to obtain proper alignment of objects in
710  * an admin queue pair.
711  */
712 struct pqi_admin_queues_aligned {
713         __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
714                 u8      iq_element_array[PQI_ADMIN_IQ_ELEMENT_LENGTH]
715                                         [PQI_ADMIN_IQ_NUM_ELEMENTS];
716         __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
717                 u8      oq_element_array[PQI_ADMIN_OQ_ELEMENT_LENGTH]
718                                         [PQI_ADMIN_OQ_NUM_ELEMENTS];
719         __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t iq_ci;
720         __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t oq_pi;
721 };
722
723 struct pqi_admin_queues {
724         void            *iq_element_array;
725         void            *oq_element_array;
726         pqi_index_t __iomem *iq_ci;
727         pqi_index_t __iomem *oq_pi;
728         dma_addr_t      iq_element_array_bus_addr;
729         dma_addr_t      oq_element_array_bus_addr;
730         dma_addr_t      iq_ci_bus_addr;
731         dma_addr_t      oq_pi_bus_addr;
732         __le32 __iomem  *iq_pi;
733         pqi_index_t     iq_pi_copy;
734         __le32 __iomem  *oq_ci;
735         pqi_index_t     oq_ci_copy;
736         struct task_struct *task;
737         u16             int_msg_num;
738 };
739
740 struct pqi_queue_group {
741         struct pqi_ctrl_info *ctrl_info;        /* backpointer */
742         u16             iq_id[2];
743         u16             oq_id;
744         u16             int_msg_num;
745         void            *iq_element_array[2];
746         void            *oq_element_array;
747         dma_addr_t      iq_element_array_bus_addr[2];
748         dma_addr_t      oq_element_array_bus_addr;
749         __le32 __iomem  *iq_pi[2];
750         pqi_index_t     iq_pi_copy[2];
751         pqi_index_t __iomem *iq_ci[2];
752         pqi_index_t __iomem *oq_pi;
753         dma_addr_t      iq_ci_bus_addr[2];
754         dma_addr_t      oq_pi_bus_addr;
755         __le32 __iomem  *oq_ci;
756         pqi_index_t     oq_ci_copy;
757         spinlock_t      submit_lock[2]; /* protect submission queue */
758         struct list_head request_list[2];
759 };
760
761 struct pqi_event_queue {
762         u16             oq_id;
763         u16             int_msg_num;
764         void            *oq_element_array;
765         pqi_index_t __iomem *oq_pi;
766         dma_addr_t      oq_element_array_bus_addr;
767         dma_addr_t      oq_pi_bus_addr;
768         __le32 __iomem  *oq_ci;
769         pqi_index_t     oq_ci_copy;
770 };
771
772 #define PQI_DEFAULT_QUEUE_GROUP         0
773 #define PQI_MAX_QUEUE_GROUPS            PQI_MAX_MSIX_VECTORS
774
775 struct pqi_encryption_info {
776         u16     data_encryption_key_index;
777         u32     encrypt_tweak_lower;
778         u32     encrypt_tweak_upper;
779 };
780
781 #pragma pack(1)
782
783 #define PQI_CONFIG_TABLE_SIGNATURE      "CFGTABLE"
784 #define PQI_CONFIG_TABLE_MAX_LENGTH     ((u16)~0)
785
786 /* configuration table section IDs */
787 #define PQI_CONFIG_TABLE_ALL_SECTIONS                   (-1)
788 #define PQI_CONFIG_TABLE_SECTION_GENERAL_INFO           0
789 #define PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES      1
790 #define PQI_CONFIG_TABLE_SECTION_FIRMWARE_ERRATA        2
791 #define PQI_CONFIG_TABLE_SECTION_DEBUG                  3
792 #define PQI_CONFIG_TABLE_SECTION_HEARTBEAT              4
793 #define PQI_CONFIG_TABLE_SECTION_SOFT_RESET             5
794
795 struct pqi_config_table {
796         u8      signature[8];           /* "CFGTABLE" */
797         __le32  first_section_offset;   /* offset in bytes from the base */
798                                         /* address of this table to the */
799                                         /* first section */
800 };
801
802 struct pqi_config_table_section_header {
803         __le16  section_id;             /* as defined by the */
804                                         /* PQI_CONFIG_TABLE_SECTION_* */
805                                         /* manifest constants above */
806         __le16  next_section_offset;    /* offset in bytes from base */
807                                         /* address of the table of the */
808                                         /* next section or 0 if last entry */
809 };
810
811 struct pqi_config_table_general_info {
812         struct pqi_config_table_section_header header;
813         __le32  section_length;         /* size of this section in bytes */
814                                         /* including the section header */
815         __le32  max_outstanding_requests;       /* max. outstanding */
816                                                 /* commands supported by */
817                                                 /* the controller */
818         __le32  max_sg_size;            /* max. transfer size of a single */
819                                         /* command */
820         __le32  max_sg_per_request;     /* max. number of scatter-gather */
821                                         /* entries supported in a single */
822                                         /* command */
823 };
824
825 struct pqi_config_table_firmware_features {
826         struct pqi_config_table_section_header header;
827         __le16  num_elements;
828         u8      features_supported[];
829 /*      u8      features_requested_by_host[]; */
830 /*      u8      features_enabled[]; */
831 /* The 2 fields below are only valid if the MAX_KNOWN_FEATURE bit is set. */
832 /*      __le16  firmware_max_known_feature; */
833 /*      __le16  host_max_known_feature; */
834 };
835
836 #define PQI_FIRMWARE_FEATURE_OFA                                0
837 #define PQI_FIRMWARE_FEATURE_SMP                                1
838 #define PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE                  2
839 #define PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS                 3
840 #define PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS                 4
841 #define PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS                 5
842 #define PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS                 6
843 #define PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS                7
844 #define PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS                8
845 #define PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS                9
846 #define PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS                10
847 #define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE               11
848 #define PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN                    12
849 #define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT                    13
850 #define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT                     14
851 #define PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME      15
852 #define PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN     16
853 #define PQI_FIRMWARE_FEATURE_MAXIMUM                            16
854
855 struct pqi_config_table_debug {
856         struct pqi_config_table_section_header header;
857         __le32  scratchpad;
858 };
859
860 struct pqi_config_table_heartbeat {
861         struct pqi_config_table_section_header header;
862         __le32  heartbeat_counter;
863 };
864
865 struct pqi_config_table_soft_reset {
866         struct pqi_config_table_section_header header;
867         u8 soft_reset_status;
868 };
869
870 #define PQI_SOFT_RESET_INITIATE         0x1
871 #define PQI_SOFT_RESET_ABORT            0x2
872
873 enum pqi_soft_reset_status {
874         RESET_INITIATE_FIRMWARE,
875         RESET_INITIATE_DRIVER,
876         RESET_ABORT,
877         RESET_NORESPONSE,
878         RESET_TIMEDOUT
879 };
880
881 union pqi_reset_register {
882         struct {
883                 u32     reset_type : 3;
884                 u32     reserved : 2;
885                 u32     reset_action : 3;
886                 u32     hold_in_pd1 : 1;
887                 u32     reserved2 : 23;
888         } bits;
889         u32     all_bits;
890 };
891
892 #define PQI_RESET_ACTION_RESET          0x1
893
894 #define PQI_RESET_TYPE_NO_RESET         0x0
895 #define PQI_RESET_TYPE_SOFT_RESET       0x1
896 #define PQI_RESET_TYPE_FIRM_RESET       0x2
897 #define PQI_RESET_TYPE_HARD_RESET       0x3
898
899 #define PQI_RESET_ACTION_COMPLETED      0x2
900
901 #define PQI_RESET_POLL_INTERVAL_MSECS   100
902
903 #define PQI_MAX_OUTSTANDING_REQUESTS            ((u32)~0)
904 #define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP      32
905 #define PQI_MAX_TRANSFER_SIZE                   (1024U * 1024U)
906 #define PQI_MAX_TRANSFER_SIZE_KDUMP             (512 * 1024U)
907
908 #define RAID_MAP_MAX_ENTRIES            1024
909
910 #define PQI_PHYSICAL_DEVICE_BUS         0
911 #define PQI_RAID_VOLUME_BUS             1
912 #define PQI_HBA_BUS                     2
913 #define PQI_EXTERNAL_RAID_VOLUME_BUS    3
914 #define PQI_MAX_BUS                     PQI_EXTERNAL_RAID_VOLUME_BUS
915 #define PQI_VSEP_CISS_BTL               379
916
917 struct report_lun_header {
918         __be32  list_length;
919         u8      flags;
920         u8      reserved[3];
921 };
922
923 /* for flags field of struct report_lun_header */
924 #define CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID      (1 << 0)
925 #define CISS_REPORT_LOG_FLAG_QUEUE_DEPTH        (1 << 5)
926 #define CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX     (1 << 6)
927
928 #define CISS_REPORT_PHYS_FLAG_OTHER             (1 << 1)
929
930 struct report_log_lun_extended_entry {
931         u8      lunid[8];
932         u8      volume_id[16];
933 };
934
935 struct report_log_lun_extended {
936         struct report_lun_header header;
937         struct report_log_lun_extended_entry lun_entries[1];
938 };
939
940 struct report_phys_lun_extended_entry {
941         u8      lunid[8];
942         __be64  wwid;
943         u8      device_type;
944         u8      device_flags;
945         u8      lun_count;      /* number of LUNs in a multi-LUN device */
946         u8      redundant_paths;
947         u32     aio_handle;
948 };
949
950 /* for device_flags field of struct report_phys_lun_extended_entry */
951 #define CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED   0x8
952
953 struct report_phys_lun_extended {
954         struct report_lun_header header;
955         struct report_phys_lun_extended_entry lun_entries[1];
956 };
957
958 struct raid_map_disk_data {
959         u32     aio_handle;
960         u8      xor_mult[2];
961         u8      reserved[2];
962 };
963
964 /* for flags field of RAID map */
965 #define RAID_MAP_ENCRYPTION_ENABLED     0x1
966
967 struct raid_map {
968         __le32  structure_size;         /* size of entire structure in bytes */
969         __le32  volume_blk_size;        /* bytes / block in the volume */
970         __le64  volume_blk_cnt;         /* logical blocks on the volume */
971         u8      phys_blk_shift;         /* shift factor to convert between */
972                                         /* units of logical blocks and */
973                                         /* physical disk blocks */
974         u8      parity_rotation_shift;  /* shift factor to convert between */
975                                         /* units of logical stripes and */
976                                         /* physical stripes */
977         __le16  strip_size;             /* blocks used on each disk / stripe */
978         __le64  disk_starting_blk;      /* first disk block used in volume */
979         __le64  disk_blk_cnt;           /* disk blocks used by volume / disk */
980         __le16  data_disks_per_row;     /* data disk entries / row in the map */
981         __le16  metadata_disks_per_row; /* mirror/parity disk entries / row */
982                                         /* in the map */
983         __le16  row_cnt;                /* rows in each layout map */
984         __le16  layout_map_count;       /* layout maps (1 map per */
985                                         /* mirror parity group) */
986         __le16  flags;
987         __le16  data_encryption_key_index;
988         u8      reserved[16];
989         struct raid_map_disk_data disk_data[RAID_MAP_MAX_ENTRIES];
990 };
991
992 #pragma pack()
993
994 struct pqi_scsi_dev_raid_map_data {
995         bool    is_write;
996         u8      raid_level;
997         u32     map_index;
998         u64     first_block;
999         u64     last_block;
1000         u32     data_length;
1001         u32     block_cnt;
1002         u32     blocks_per_row;
1003         u64     first_row;
1004         u64     last_row;
1005         u32     first_row_offset;
1006         u32     last_row_offset;
1007         u32     first_column;
1008         u32     last_column;
1009         u64     r5or6_first_row;
1010         u64     r5or6_last_row;
1011         u32     r5or6_first_row_offset;
1012         u32     r5or6_last_row_offset;
1013         u32     r5or6_first_column;
1014         u32     r5or6_last_column;
1015         u16     data_disks_per_row;
1016         u32     total_disks_per_row;
1017         u16     layout_map_count;
1018         u32     stripesize;
1019         u16     strip_size;
1020         u32     first_group;
1021         u32     last_group;
1022         u32     map_row;
1023         u32     aio_handle;
1024         u64     disk_block;
1025         u32     disk_block_cnt;
1026         u8      cdb[16];
1027         u8      cdb_length;
1028
1029         /* RAID 1 specific */
1030 #define NUM_RAID1_MAP_ENTRIES   3
1031         u32     num_it_nexus_entries;
1032         u32     it_nexus[NUM_RAID1_MAP_ENTRIES];
1033
1034         /* RAID 5 / RAID 6 specific */
1035         u32     p_parity_it_nexus;      /* aio_handle */
1036         u32     q_parity_it_nexus;      /* aio_handle */
1037         u8      xor_mult;
1038         u64     row;
1039         u64     stripe_lba;
1040         u32     p_index;
1041         u32     q_index;
1042 };
1043
1044 #define RAID_CTLR_LUNID         "\0\0\0\0\0\0\0\0"
1045
1046 #define NUM_STREAMS_PER_LUN     8
1047
1048 struct pqi_stream_data {
1049         u64     next_lba;
1050         u32     last_accessed;
1051 };
1052
1053 struct pqi_scsi_dev {
1054         int     devtype;                /* as reported by INQUIRY commmand */
1055         u8      device_type;            /* as reported by */
1056                                         /* BMIC_IDENTIFY_PHYSICAL_DEVICE */
1057                                         /* only valid for devtype = TYPE_DISK */
1058         int     bus;
1059         int     target;
1060         int     lun;
1061         u8      scsi3addr[8];
1062         __be64  wwid;
1063         u8      volume_id[16];
1064         u8      is_physical_device : 1;
1065         u8      is_external_raid_device : 1;
1066         u8      is_expander_smp_device : 1;
1067         u8      target_lun_valid : 1;
1068         u8      device_gone : 1;
1069         u8      new_device : 1;
1070         u8      keep_device : 1;
1071         u8      volume_offline : 1;
1072         u8      rescan : 1;
1073         bool    aio_enabled;            /* only valid for physical disks */
1074         bool    in_remove;
1075         bool    device_offline;
1076         u8      vendor[8];              /* bytes 8-15 of inquiry data */
1077         u8      model[16];              /* bytes 16-31 of inquiry data */
1078         u64     sas_address;
1079         u8      raid_level;
1080         u16     queue_depth;            /* max. queue_depth for this device */
1081         u16     advertised_queue_depth;
1082         u32     aio_handle;
1083         u8      volume_status;
1084         u8      active_path_index;
1085         u8      path_map;
1086         u8      bay;
1087         u8      box_index;
1088         u8      phys_box_on_bus;
1089         u8      phy_connected_dev_type;
1090         u8      box[8];
1091         u16     phys_connector[8];
1092         u8      phy_id;
1093         bool    raid_bypass_configured; /* RAID bypass configured */
1094         bool    raid_bypass_enabled;    /* RAID bypass enabled */
1095         u32     next_bypass_group;
1096         struct raid_map *raid_map;      /* RAID bypass map */
1097         u32     max_transfer_encrypted;
1098
1099         struct pqi_sas_port *sas_port;
1100         struct scsi_device *sdev;
1101
1102         struct list_head scsi_device_list_entry;
1103         struct list_head new_device_list_entry;
1104         struct list_head add_list_entry;
1105         struct list_head delete_list_entry;
1106
1107         struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
1108         atomic_t scsi_cmds_outstanding;
1109         atomic_t raid_bypass_cnt;
1110         u8      page_83_identifier[16];
1111 };
1112
1113 /* VPD inquiry pages */
1114 #define CISS_VPD_LV_DEVICE_GEOMETRY     0xc1    /* vendor-specific page */
1115 #define CISS_VPD_LV_BYPASS_STATUS       0xc2    /* vendor-specific page */
1116 #define CISS_VPD_LV_STATUS              0xc3    /* vendor-specific page */
1117
1118 #define VPD_PAGE        (1 << 8)
1119
1120 #pragma pack(1)
1121
1122 /* structure for CISS_VPD_LV_STATUS */
1123 struct ciss_vpd_logical_volume_status {
1124         u8      peripheral_info;
1125         u8      page_code;
1126         u8      reserved;
1127         u8      page_length;
1128         u8      volume_status;
1129         u8      reserved2[3];
1130         __be32  flags;
1131 };
1132
1133 #pragma pack()
1134
1135 /* constants for volume_status field of ciss_vpd_logical_volume_status */
1136 #define CISS_LV_OK                                      0
1137 #define CISS_LV_FAILED                                  1
1138 #define CISS_LV_NOT_CONFIGURED                          2
1139 #define CISS_LV_DEGRADED                                3
1140 #define CISS_LV_READY_FOR_RECOVERY                      4
1141 #define CISS_LV_UNDERGOING_RECOVERY                     5
1142 #define CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED           6
1143 #define CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM       7
1144 #define CISS_LV_HARDWARE_OVERHEATING                    8
1145 #define CISS_LV_HARDWARE_HAS_OVERHEATED                 9
1146 #define CISS_LV_UNDERGOING_EXPANSION                    10
1147 #define CISS_LV_NOT_AVAILABLE                           11
1148 #define CISS_LV_QUEUED_FOR_EXPANSION                    12
1149 #define CISS_LV_DISABLED_SCSI_ID_CONFLICT               13
1150 #define CISS_LV_EJECTED                                 14
1151 #define CISS_LV_UNDERGOING_ERASE                        15
1152 /* state 16 not used */
1153 #define CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD      17
1154 #define CISS_LV_UNDERGOING_RPI                          18
1155 #define CISS_LV_PENDING_RPI                             19
1156 #define CISS_LV_ENCRYPTED_NO_KEY                        20
1157 /* state 21 not used */
1158 #define CISS_LV_UNDERGOING_ENCRYPTION                   22
1159 #define CISS_LV_UNDERGOING_ENCRYPTION_REKEYING          23
1160 #define CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER   24
1161 #define CISS_LV_PENDING_ENCRYPTION                      25
1162 #define CISS_LV_PENDING_ENCRYPTION_REKEYING             26
1163 #define CISS_LV_NOT_SUPPORTED                           27
1164 #define CISS_LV_STATUS_UNAVAILABLE                      255
1165
1166 /* constants for flags field of ciss_vpd_logical_volume_status */
1167 #define CISS_LV_FLAGS_NO_HOST_IO        0x1     /* volume not available for */
1168                                                 /* host I/O */
1169
1170 /* for SAS hosts and SAS expanders */
1171 struct pqi_sas_node {
1172         struct device *parent_dev;
1173         struct list_head port_list_head;
1174 };
1175
1176 struct pqi_sas_port {
1177         struct list_head port_list_entry;
1178         u64     sas_address;
1179         struct pqi_scsi_dev *device;
1180         struct sas_port *port;
1181         int     next_phy_index;
1182         struct list_head phy_list_head;
1183         struct pqi_sas_node *parent_node;
1184         struct sas_rphy *rphy;
1185 };
1186
1187 struct pqi_sas_phy {
1188         struct list_head phy_list_entry;
1189         struct sas_phy *phy;
1190         struct pqi_sas_port *parent_port;
1191         bool    added_to_port;
1192 };
1193
1194 struct pqi_io_request {
1195         atomic_t        refcount;
1196         u16             index;
1197         void (*io_complete_callback)(struct pqi_io_request *io_request,
1198                 void *context);
1199         void            *context;
1200         u8              raid_bypass : 1;
1201         int             status;
1202         struct pqi_queue_group *queue_group;
1203         struct scsi_cmnd *scmd;
1204         void            *error_info;
1205         struct pqi_sg_descriptor *sg_chain_buffer;
1206         dma_addr_t      sg_chain_buffer_dma_handle;
1207         void            *iu;
1208         struct list_head request_list_entry;
1209 };
1210
1211 #define PQI_NUM_SUPPORTED_EVENTS        7
1212
1213 struct pqi_event {
1214         bool    pending;
1215         u8      event_type;
1216         u16     event_id;
1217         u32     additional_event_id;
1218 };
1219
1220 #define PQI_RESERVED_IO_SLOTS_LUN_RESET                 1
1221 #define PQI_RESERVED_IO_SLOTS_EVENT_ACK                 PQI_NUM_SUPPORTED_EVENTS
1222 #define PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS      3
1223 #define PQI_RESERVED_IO_SLOTS                           \
1224         (PQI_RESERVED_IO_SLOTS_LUN_RESET + PQI_RESERVED_IO_SLOTS_EVENT_ACK + \
1225         PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS)
1226
1227 #define PQI_CTRL_PRODUCT_ID_GEN1        0
1228 #define PQI_CTRL_PRODUCT_ID_GEN2        7
1229 #define PQI_CTRL_PRODUCT_REVISION_A     0
1230 #define PQI_CTRL_PRODUCT_REVISION_B     1
1231
1232 struct pqi_ctrl_info {
1233         unsigned int    ctrl_id;
1234         struct pci_dev  *pci_dev;
1235         char            firmware_version[32];
1236         char            serial_number[17];
1237         char            model[17];
1238         char            vendor[9];
1239         u8              product_id;
1240         u8              product_revision;
1241         void __iomem    *iomem_base;
1242         struct pqi_ctrl_registers __iomem *registers;
1243         struct pqi_device_registers __iomem *pqi_registers;
1244         u32             max_sg_entries;
1245         u32             config_table_offset;
1246         u32             config_table_length;
1247         u16             max_inbound_queues;
1248         u16             max_elements_per_iq;
1249         u16             max_iq_element_length;
1250         u16             max_outbound_queues;
1251         u16             max_elements_per_oq;
1252         u16             max_oq_element_length;
1253         u32             max_transfer_size;
1254         u32             max_outstanding_requests;
1255         u32             max_io_slots;
1256         unsigned int    scsi_ml_can_queue;
1257         unsigned short  sg_tablesize;
1258         unsigned int    max_sectors;
1259         u32             error_buffer_length;
1260         void            *error_buffer;
1261         dma_addr_t      error_buffer_dma_handle;
1262         size_t          sg_chain_buffer_length;
1263         unsigned int    num_queue_groups;
1264         u16             max_hw_queue_index;
1265         u16             num_elements_per_iq;
1266         u16             num_elements_per_oq;
1267         u16             max_inbound_iu_length_per_firmware;
1268         u16             max_inbound_iu_length;
1269         unsigned int    max_sg_per_iu;
1270         unsigned int    max_sg_per_r56_iu;
1271         void            *admin_queue_memory_base;
1272         u32             admin_queue_memory_length;
1273         dma_addr_t      admin_queue_memory_base_dma_handle;
1274         void            *queue_memory_base;
1275         u32             queue_memory_length;
1276         dma_addr_t      queue_memory_base_dma_handle;
1277         struct pqi_admin_queues admin_queues;
1278         struct pqi_queue_group queue_groups[PQI_MAX_QUEUE_GROUPS];
1279         struct pqi_event_queue event_queue;
1280         enum pqi_irq_mode irq_mode;
1281         int             max_msix_vectors;
1282         int             num_msix_vectors_enabled;
1283         int             num_msix_vectors_initialized;
1284         int             event_irq;
1285         struct Scsi_Host *scsi_host;
1286
1287         struct mutex    scan_mutex;
1288         struct mutex    lun_reset_mutex;
1289         bool            controller_online;
1290         bool            block_requests;
1291         bool            scan_blocked;
1292         u8              inbound_spanning_supported : 1;
1293         u8              outbound_spanning_supported : 1;
1294         u8              pqi_mode_enabled : 1;
1295         u8              pqi_reset_quiesce_supported : 1;
1296         u8              soft_reset_handshake_supported : 1;
1297         u8              raid_iu_timeout_supported : 1;
1298         u8              tmf_iu_timeout_supported : 1;
1299         u8              unique_wwid_in_report_phys_lun_supported : 1;
1300         u8              enable_r1_writes : 1;
1301         u8              enable_r5_writes : 1;
1302         u8              enable_r6_writes : 1;
1303         u8              lv_drive_type_mix_valid : 1;
1304         u8              enable_stream_detection : 1;
1305
1306         u8              ciss_report_log_flags;
1307         u32             max_transfer_encrypted_sas_sata;
1308         u32             max_transfer_encrypted_nvme;
1309         u32             max_write_raid_5_6;
1310         u32             max_write_raid_1_10_2drive;
1311         u32             max_write_raid_1_10_3drive;
1312
1313         struct list_head scsi_device_list;
1314         spinlock_t      scsi_device_list_lock;
1315
1316         struct delayed_work rescan_work;
1317         struct delayed_work update_time_work;
1318
1319         struct pqi_sas_node *sas_host;
1320         u64             sas_address;
1321
1322         struct pqi_io_request *io_request_pool;
1323         u16             next_io_request_slot;
1324
1325         struct pqi_event events[PQI_NUM_SUPPORTED_EVENTS];
1326         struct work_struct event_work;
1327
1328         atomic_t        num_interrupts;
1329         int             previous_num_interrupts;
1330         u32             previous_heartbeat_count;
1331         __le32 __iomem  *heartbeat_counter;
1332         u8 __iomem      *soft_reset_status;
1333         struct timer_list heartbeat_timer;
1334         struct work_struct ctrl_offline_work;
1335
1336         struct semaphore sync_request_sem;
1337         atomic_t        num_busy_threads;
1338         atomic_t        num_blocked_threads;
1339         wait_queue_head_t block_requests_wait;
1340
1341         struct mutex    ofa_mutex;
1342         struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
1343         dma_addr_t      pqi_ofa_mem_dma_handle;
1344         void            **pqi_ofa_chunk_virt_addr;
1345         struct work_struct ofa_memory_alloc_work;
1346         struct work_struct ofa_quiesce_work;
1347         u32             ofa_bytes_requested;
1348         u16             ofa_cancel_reason;
1349 };
1350
1351 enum pqi_ctrl_mode {
1352         SIS_MODE = 0,
1353         PQI_MODE
1354 };
1355
1356 /*
1357  * assume worst case: SATA queue depth of 31 minus 4 internal firmware commands
1358  */
1359 #define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH       27
1360
1361 /* CISS commands */
1362 #define CISS_READ               0xc0
1363 #define CISS_REPORT_LOG         0xc2    /* Report Logical LUNs */
1364 #define CISS_REPORT_PHYS        0xc3    /* Report Physical LUNs */
1365 #define CISS_GET_RAID_MAP       0xc8
1366
1367 /* BMIC commands */
1368 #define BMIC_IDENTIFY_CONTROLLER                0x11
1369 #define BMIC_IDENTIFY_PHYSICAL_DEVICE           0x15
1370 #define BMIC_READ                               0x26
1371 #define BMIC_WRITE                              0x27
1372 #define BMIC_SENSE_FEATURE                      0x61
1373 #define BMIC_SENSE_CONTROLLER_PARAMETERS        0x64
1374 #define BMIC_SENSE_SUBSYSTEM_INFORMATION        0x66
1375 #define BMIC_CSMI_PASSTHRU                      0x68
1376 #define BMIC_WRITE_HOST_WELLNESS                0xa5
1377 #define BMIC_FLUSH_CACHE                        0xc2
1378 #define BMIC_SET_DIAG_OPTIONS                   0xf4
1379 #define BMIC_SENSE_DIAG_OPTIONS                 0xf5
1380
1381 #define CSMI_CC_SAS_SMP_PASSTHRU                0x17
1382
1383 #define SA_FLUSH_CACHE                          0x1
1384
1385 #define MASKED_DEVICE(lunid)                    ((lunid)[3] & 0xc0)
1386 #define CISS_GET_LEVEL_2_BUS(lunid)             ((lunid)[7] & 0x3f)
1387 #define CISS_GET_LEVEL_2_TARGET(lunid)          ((lunid)[6])
1388 #define CISS_GET_DRIVE_NUMBER(lunid)            \
1389         (((CISS_GET_LEVEL_2_BUS((lunid)) - 1) << 8) + \
1390         CISS_GET_LEVEL_2_TARGET((lunid)))
1391
1392 #define LV_GET_DRIVE_TYPE_MIX(lunid)            ((lunid)[6])
1393
1394 #define LV_DRIVE_TYPE_MIX_UNKNOWN               0
1395 #define LV_DRIVE_TYPE_MIX_NO_RESTRICTION        1
1396 #define LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY          2
1397 #define LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY         3
1398 #define LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY  4
1399 #define LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY          5
1400 #define LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY         6
1401 #define LV_DRIVE_TYPE_MIX_SAS_ONLY              7
1402 #define LV_DRIVE_TYPE_MIX_SATA_ONLY             8
1403 #define LV_DRIVE_TYPE_MIX_NVME_ONLY             9
1404
1405 #define NO_TIMEOUT              ((unsigned long) -1)
1406
1407 #pragma pack(1)
1408
1409 struct bmic_identify_controller {
1410         u8      configured_logical_drive_count;
1411         __le32  configuration_signature;
1412         u8      firmware_version_short[4];
1413         u8      reserved[145];
1414         __le16  extended_logical_unit_count;
1415         u8      reserved1[34];
1416         __le16  firmware_build_number;
1417         u8      reserved2[8];
1418         u8      vendor_id[8];
1419         u8      product_id[16];
1420         u8      reserved3[62];
1421         __le32  extra_controller_flags;
1422         u8      reserved4[2];
1423         u8      controller_mode;
1424         u8      spare_part_number[32];
1425         u8      firmware_version_long[32];
1426 };
1427
1428 /* constants for extra_controller_flags field of bmic_identify_controller */
1429 #define BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED     0x20000000
1430
1431 struct bmic_sense_subsystem_info {
1432         u8      reserved[44];
1433         u8      ctrl_serial_number[16];
1434 };
1435
1436 /* constants for device_type field */
1437 #define SA_DEVICE_TYPE_SATA             0x1
1438 #define SA_DEVICE_TYPE_SAS              0x2
1439 #define SA_DEVICE_TYPE_EXPANDER_SMP     0x5
1440 #define SA_DEVICE_TYPE_SES              0x6
1441 #define SA_DEVICE_TYPE_CONTROLLER       0x7
1442 #define SA_DEVICE_TYPE_NVME             0x9
1443
1444 struct bmic_identify_physical_device {
1445         u8      scsi_bus;               /* SCSI Bus number on controller */
1446         u8      scsi_id;                /* SCSI ID on this bus */
1447         __le16  block_size;             /* sector size in bytes */
1448         __le32  total_blocks;           /* number for sectors on drive */
1449         __le32  reserved_blocks;        /* controller reserved (RIS) */
1450         u8      model[40];              /* Physical Drive Model */
1451         u8      serial_number[40];      /* Drive Serial Number */
1452         u8      firmware_revision[8];   /* drive firmware revision */
1453         u8      scsi_inquiry_bits;      /* inquiry byte 7 bits */
1454         u8      compaq_drive_stamp;     /* 0 means drive not stamped */
1455         u8      last_failure_reason;
1456         u8      flags;
1457         u8      more_flags;
1458         u8      scsi_lun;               /* SCSI LUN for phys drive */
1459         u8      yet_more_flags;
1460         u8      even_more_flags;
1461         __le32  spi_speed_rules;
1462         u8      phys_connector[2];      /* connector number on controller */
1463         u8      phys_box_on_bus;        /* phys enclosure this drive resides */
1464         u8      phys_bay_in_box;        /* phys drv bay this drive resides */
1465         __le32  rpm;                    /* drive rotational speed in RPM */
1466         u8      device_type;            /* type of drive */
1467         u8      sata_version;           /* only valid when device_type = */
1468                                         /* SA_DEVICE_TYPE_SATA */
1469         __le64  big_total_block_count;
1470         __le64  ris_starting_lba;
1471         __le32  ris_size;
1472         u8      wwid[20];
1473         u8      controller_phy_map[32];
1474         __le16  phy_count;
1475         u8      phy_connected_dev_type[256];
1476         u8      phy_to_drive_bay_num[256];
1477         __le16  phy_to_attached_dev_index[256];
1478         u8      box_index;
1479         u8      reserved;
1480         __le16  extra_physical_drive_flags;
1481         u8      negotiated_link_rate[256];
1482         u8      phy_to_phy_map[256];
1483         u8      redundant_path_present_map;
1484         u8      redundant_path_failure_map;
1485         u8      active_path_number;
1486         __le16  alternate_paths_phys_connector[8];
1487         u8      alternate_paths_phys_box_on_port[8];
1488         u8      multi_lun_device_lun_count;
1489         u8      minimum_good_fw_revision[8];
1490         u8      unique_inquiry_bytes[20];
1491         u8      current_temperature_degrees;
1492         u8      temperature_threshold_degrees;
1493         u8      max_temperature_degrees;
1494         u8      logical_blocks_per_phys_block_exp;
1495         __le16  current_queue_depth_limit;
1496         u8      switch_name[10];
1497         __le16  switch_port;
1498         u8      alternate_paths_switch_name[40];
1499         u8      alternate_paths_switch_port[8];
1500         __le16  power_on_hours;
1501         __le16  percent_endurance_used;
1502         u8      drive_authentication;
1503         u8      smart_carrier_authentication;
1504         u8      smart_carrier_app_fw_version;
1505         u8      smart_carrier_bootloader_fw_version;
1506         u8      sanitize_flags;
1507         u8      encryption_key_flags;
1508         u8      encryption_key_name[64];
1509         __le32  misc_drive_flags;
1510         __le16  dek_index;
1511         __le16  hba_drive_encryption_flags;
1512         __le16  max_overwrite_time;
1513         __le16  max_block_erase_time;
1514         __le16  max_crypto_erase_time;
1515         u8      connector_info[5];
1516         u8      connector_name[8][8];
1517         u8      page_83_identifier[16];
1518         u8      maximum_link_rate[256];
1519         u8      negotiated_physical_link_rate[256];
1520         u8      box_connector_name[8];
1521         u8      padding_to_multiple_of_512[9];
1522 };
1523
1524 #define BMIC_SENSE_FEATURE_IO_PAGE              0x8
1525 #define BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE  0x2
1526
1527 struct bmic_sense_feature_buffer_header {
1528         u8      page_code;
1529         u8      subpage_code;
1530         __le16  buffer_length;
1531 };
1532
1533 struct bmic_sense_feature_page_header {
1534         u8      page_code;
1535         u8      subpage_code;
1536         __le16  page_length;
1537 };
1538
1539 struct bmic_sense_feature_io_page_aio_subpage {
1540         struct bmic_sense_feature_page_header header;
1541         u8      firmware_read_support;
1542         u8      driver_read_support;
1543         u8      firmware_write_support;
1544         u8      driver_write_support;
1545         __le16  max_transfer_encrypted_sas_sata;
1546         __le16  max_transfer_encrypted_nvme;
1547         __le16  max_write_raid_5_6;
1548         __le16  max_write_raid_1_10_2drive;
1549         __le16  max_write_raid_1_10_3drive;
1550 };
1551
1552 struct bmic_smp_request {
1553         u8      frame_type;
1554         u8      function;
1555         u8      allocated_response_length;
1556         u8      request_length;
1557         u8      additional_request_bytes[1016];
1558 };
1559
1560 struct  bmic_smp_response {
1561         u8      frame_type;
1562         u8      function;
1563         u8      function_result;
1564         u8      response_length;
1565         u8      additional_response_bytes[1016];
1566 };
1567
1568 struct bmic_csmi_ioctl_header {
1569         __le32  header_length;
1570         u8      signature[8];
1571         __le32  timeout;
1572         __le32  control_code;
1573         __le32  return_code;
1574         __le32  length;
1575 };
1576
1577 struct bmic_csmi_smp_passthru {
1578         u8      phy_identifier;
1579         u8      port_identifier;
1580         u8      connection_rate;
1581         u8      reserved;
1582         __be64  destination_sas_address;
1583         __le32  request_length;
1584         struct bmic_smp_request request;
1585         u8      connection_status;
1586         u8      reserved1[3];
1587         __le32  response_length;
1588         struct bmic_smp_response response;
1589 };
1590
1591 struct bmic_csmi_smp_passthru_buffer {
1592         struct bmic_csmi_ioctl_header ioctl_header;
1593         struct bmic_csmi_smp_passthru parameters;
1594 };
1595
1596 struct bmic_flush_cache {
1597         u8      disable_flag;
1598         u8      system_power_action;
1599         u8      ndu_flush;
1600         u8      shutdown_event;
1601         u8      reserved[28];
1602 };
1603
1604 /* for shutdown_event member of struct bmic_flush_cache */
1605 enum bmic_flush_cache_shutdown_event {
1606         NONE_CACHE_FLUSH_ONLY = 0,
1607         SHUTDOWN = 1,
1608         HIBERNATE = 2,
1609         SUSPEND = 3,
1610         RESTART = 4
1611 };
1612
1613 struct bmic_diag_options {
1614         __le32 options;
1615 };
1616
1617 #pragma pack()
1618
1619 static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
1620 {
1621         void *hostdata = shost_priv(shost);
1622
1623         return *((struct pqi_ctrl_info **)hostdata);
1624 }
1625
1626 void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
1627         struct sas_rphy *rphy);
1628
1629 int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info);
1630 void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info);
1631 int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
1632         struct pqi_scsi_dev *device);
1633 void pqi_remove_sas_device(struct pqi_scsi_dev *device);
1634 struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
1635         struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy);
1636 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd);
1637 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
1638         struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
1639         struct pqi_raid_error_info *error_info);
1640
1641 extern struct sas_function_template pqi_sas_transport_functions;
1642
1643 #endif /* _SMARTPQI_H */