1 // SPDX-License-Identifier: GPL-2.0
3 * driver for Microchip PQI-based storage controllers
4 * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
8 * Questions/Comments/Bugfixes to storagedev@microchip.com
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
30 #include "smartpqi_sis.h"
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
36 #define DRIVER_VERSION "2.1.12-055"
37 #define DRIVER_MAJOR 2
38 #define DRIVER_MINOR 1
39 #define DRIVER_RELEASE 12
40 #define DRIVER_REVISION 55
42 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT "smartpqi"
46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
48 #define PQI_POST_RESET_DELAY_SECS 5
49 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
51 MODULE_AUTHOR("Microchip");
52 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
54 MODULE_VERSION(DRIVER_VERSION);
55 MODULE_LICENSE("GPL");
57 static void pqi_verify_structures(void);
58 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
59 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
60 static void pqi_ctrl_offline_worker(struct work_struct *work);
61 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
62 static void pqi_scan_start(struct Scsi_Host *shost);
63 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
64 struct pqi_queue_group *queue_group, enum pqi_io_path path,
65 struct pqi_io_request *io_request);
66 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
67 struct pqi_iu_header *request, unsigned int flags,
68 struct pqi_raid_error_info *error_info);
69 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
70 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
71 unsigned int cdb_length, struct pqi_queue_group *queue_group,
72 struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
73 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
74 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
75 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
76 struct pqi_scsi_dev_raid_map_data *rmd);
77 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
78 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
79 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
80 struct pqi_scsi_dev_raid_map_data *rmd);
81 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
82 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
83 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
84 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
85 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
86 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
87 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
88 struct pqi_scsi_dev *device, unsigned long timeout_msecs);
90 /* for flags argument to pqi_submit_raid_request_synchronous() */
91 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
93 static struct scsi_transport_template *pqi_sas_transport_template;
95 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
97 enum pqi_lockup_action {
103 static enum pqi_lockup_action pqi_lockup_action = NONE;
106 enum pqi_lockup_action action;
108 } pqi_lockup_actions[] = {
123 static unsigned int pqi_supported_event_types[] = {
124 PQI_EVENT_TYPE_HOTPLUG,
125 PQI_EVENT_TYPE_HARDWARE,
126 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
127 PQI_EVENT_TYPE_LOGICAL_DEVICE,
129 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
130 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
133 static int pqi_disable_device_id_wildcards;
134 module_param_named(disable_device_id_wildcards,
135 pqi_disable_device_id_wildcards, int, 0644);
136 MODULE_PARM_DESC(disable_device_id_wildcards,
137 "Disable device ID wildcards.");
139 static int pqi_disable_heartbeat;
140 module_param_named(disable_heartbeat,
141 pqi_disable_heartbeat, int, 0644);
142 MODULE_PARM_DESC(disable_heartbeat,
143 "Disable heartbeat.");
145 static int pqi_disable_ctrl_shutdown;
146 module_param_named(disable_ctrl_shutdown,
147 pqi_disable_ctrl_shutdown, int, 0644);
148 MODULE_PARM_DESC(disable_ctrl_shutdown,
149 "Disable controller shutdown when controller locked up.");
151 static char *pqi_lockup_action_param;
152 module_param_named(lockup_action,
153 pqi_lockup_action_param, charp, 0644);
154 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
155 "\t\tSupported: none, reboot, panic\n"
156 "\t\tDefault: none");
158 static int pqi_expose_ld_first;
159 module_param_named(expose_ld_first,
160 pqi_expose_ld_first, int, 0644);
161 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
163 static int pqi_hide_vsep;
164 module_param_named(hide_vsep,
165 pqi_hide_vsep, int, 0644);
166 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
168 static char *raid_levels[] = {
178 static char *pqi_raid_level_to_string(u8 raid_level)
180 if (raid_level < ARRAY_SIZE(raid_levels))
181 return raid_levels[raid_level];
183 return "RAID UNKNOWN";
188 #define SA_RAID_1 2 /* also used for RAID 10 */
189 #define SA_RAID_5 3 /* also used for RAID 50 */
191 #define SA_RAID_6 5 /* also used for RAID 60 */
192 #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
193 #define SA_RAID_MAX SA_RAID_TRIPLE
194 #define SA_RAID_UNKNOWN 0xff
196 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
198 pqi_prep_for_scsi_done(scmd);
202 static inline void pqi_disable_write_same(struct scsi_device *sdev)
204 sdev->no_write_same = 1;
207 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
209 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
212 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
214 return !device->is_physical_device;
217 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
219 return scsi3addr[2] != 0;
222 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
224 return !ctrl_info->controller_online;
227 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
229 if (ctrl_info->controller_online)
230 if (!sis_is_firmware_running(ctrl_info))
231 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
234 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
236 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
239 #define PQI_DRIVER_SCRATCH_PQI_MODE 0x1
240 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2
242 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
244 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
247 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
248 enum pqi_ctrl_mode mode)
252 driver_scratch = sis_read_driver_scratch(ctrl_info);
254 if (mode == PQI_MODE)
255 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
257 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
259 sis_write_driver_scratch(ctrl_info, driver_scratch);
262 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
264 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
267 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
271 driver_scratch = sis_read_driver_scratch(ctrl_info);
274 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
276 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
278 sis_write_driver_scratch(ctrl_info, driver_scratch);
281 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
283 ctrl_info->scan_blocked = true;
284 mutex_lock(&ctrl_info->scan_mutex);
287 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
289 ctrl_info->scan_blocked = false;
290 mutex_unlock(&ctrl_info->scan_mutex);
293 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
295 return ctrl_info->scan_blocked;
298 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
300 mutex_lock(&ctrl_info->lun_reset_mutex);
303 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
305 mutex_unlock(&ctrl_info->lun_reset_mutex);
308 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
310 struct Scsi_Host *shost;
311 unsigned int num_loops;
314 shost = ctrl_info->scsi_host;
316 scsi_block_requests(shost);
320 while (scsi_host_busy(shost)) {
328 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
330 scsi_unblock_requests(ctrl_info->scsi_host);
333 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
335 atomic_inc(&ctrl_info->num_busy_threads);
338 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
340 atomic_dec(&ctrl_info->num_busy_threads);
343 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
345 return ctrl_info->block_requests;
348 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
350 ctrl_info->block_requests = true;
353 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
355 ctrl_info->block_requests = false;
356 wake_up_all(&ctrl_info->block_requests_wait);
359 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
361 if (!pqi_ctrl_blocked(ctrl_info))
364 atomic_inc(&ctrl_info->num_blocked_threads);
365 wait_event(ctrl_info->block_requests_wait,
366 !pqi_ctrl_blocked(ctrl_info));
367 atomic_dec(&ctrl_info->num_blocked_threads);
370 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
372 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
374 unsigned long start_jiffies;
375 unsigned long warning_timeout;
376 bool displayed_warning;
378 displayed_warning = false;
379 start_jiffies = jiffies;
380 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
382 while (atomic_read(&ctrl_info->num_busy_threads) >
383 atomic_read(&ctrl_info->num_blocked_threads)) {
384 if (time_after(jiffies, warning_timeout)) {
385 dev_warn(&ctrl_info->pci_dev->dev,
386 "waiting %u seconds for driver activity to quiesce\n",
387 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
388 displayed_warning = true;
389 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
391 usleep_range(1000, 2000);
394 if (displayed_warning)
395 dev_warn(&ctrl_info->pci_dev->dev,
396 "driver activity quiesced after waiting for %u seconds\n",
397 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
400 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
402 return device->device_offline;
405 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
407 mutex_lock(&ctrl_info->ofa_mutex);
410 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
412 mutex_unlock(&ctrl_info->ofa_mutex);
415 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
417 mutex_lock(&ctrl_info->ofa_mutex);
418 mutex_unlock(&ctrl_info->ofa_mutex);
421 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
423 return mutex_is_locked(&ctrl_info->ofa_mutex);
426 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
428 device->in_remove = true;
431 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
433 return device->in_remove;
436 static inline int pqi_event_type_to_event_index(unsigned int event_type)
440 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
441 if (event_type == pqi_supported_event_types[index])
447 static inline bool pqi_is_supported_event(unsigned int event_type)
449 return pqi_event_type_to_event_index(event_type) != -1;
452 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
455 if (pqi_ctrl_offline(ctrl_info))
458 schedule_delayed_work(&ctrl_info->rescan_work, delay);
461 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
463 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
466 #define PQI_RESCAN_WORK_DELAY (10 * HZ)
468 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
470 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
473 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
475 cancel_delayed_work_sync(&ctrl_info->rescan_work);
478 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
480 if (!ctrl_info->heartbeat_counter)
483 return readl(ctrl_info->heartbeat_counter);
486 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
488 return readb(ctrl_info->soft_reset_status);
491 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
495 status = pqi_read_soft_reset_status(ctrl_info);
496 status &= ~PQI_SOFT_RESET_ABORT;
497 writeb(status, ctrl_info->soft_reset_status);
500 static int pqi_map_single(struct pci_dev *pci_dev,
501 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
502 size_t buffer_length, enum dma_data_direction data_direction)
504 dma_addr_t bus_address;
506 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
509 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
511 if (dma_mapping_error(&pci_dev->dev, bus_address))
514 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
515 put_unaligned_le32(buffer_length, &sg_descriptor->length);
516 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
521 static void pqi_pci_unmap(struct pci_dev *pci_dev,
522 struct pqi_sg_descriptor *descriptors, int num_descriptors,
523 enum dma_data_direction data_direction)
527 if (data_direction == DMA_NONE)
530 for (i = 0; i < num_descriptors; i++)
531 dma_unmap_single(&pci_dev->dev,
532 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
533 get_unaligned_le32(&descriptors[i].length),
537 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
538 struct pqi_raid_path_request *request, u8 cmd,
539 u8 *scsi3addr, void *buffer, size_t buffer_length,
540 u16 vpd_page, enum dma_data_direction *dir)
543 size_t cdb_length = buffer_length;
545 memset(request, 0, sizeof(*request));
547 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
548 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
549 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
550 &request->header.iu_length);
551 put_unaligned_le32(buffer_length, &request->buffer_length);
552 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
553 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
554 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
559 case TEST_UNIT_READY:
560 request->data_direction = SOP_READ_FLAG;
561 cdb[0] = TEST_UNIT_READY;
564 request->data_direction = SOP_READ_FLAG;
566 if (vpd_page & VPD_PAGE) {
568 cdb[2] = (u8)vpd_page;
570 cdb[4] = (u8)cdb_length;
572 case CISS_REPORT_LOG:
573 case CISS_REPORT_PHYS:
574 request->data_direction = SOP_READ_FLAG;
576 if (cmd == CISS_REPORT_PHYS) {
577 if (ctrl_info->rpl_extended_format_4_5_supported)
578 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
580 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
582 cdb[1] = ctrl_info->ciss_report_log_flags;
584 put_unaligned_be32(cdb_length, &cdb[6]);
586 case CISS_GET_RAID_MAP:
587 request->data_direction = SOP_READ_FLAG;
589 cdb[1] = CISS_GET_RAID_MAP;
590 put_unaligned_be32(cdb_length, &cdb[6]);
593 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
594 request->data_direction = SOP_WRITE_FLAG;
596 cdb[6] = BMIC_FLUSH_CACHE;
597 put_unaligned_be16(cdb_length, &cdb[7]);
599 case BMIC_SENSE_DIAG_OPTIONS:
602 case BMIC_IDENTIFY_CONTROLLER:
603 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
604 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
605 case BMIC_SENSE_FEATURE:
606 request->data_direction = SOP_READ_FLAG;
609 put_unaligned_be16(cdb_length, &cdb[7]);
611 case BMIC_SET_DIAG_OPTIONS:
614 case BMIC_WRITE_HOST_WELLNESS:
615 request->data_direction = SOP_WRITE_FLAG;
618 put_unaligned_be16(cdb_length, &cdb[7]);
620 case BMIC_CSMI_PASSTHRU:
621 request->data_direction = SOP_BIDIRECTIONAL;
623 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
625 put_unaligned_be16(cdb_length, &cdb[7]);
628 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
632 switch (request->data_direction) {
634 *dir = DMA_FROM_DEVICE;
637 *dir = DMA_TO_DEVICE;
639 case SOP_NO_DIRECTION_FLAG:
643 *dir = DMA_BIDIRECTIONAL;
647 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
648 buffer, buffer_length, *dir);
651 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
653 io_request->scmd = NULL;
654 io_request->status = 0;
655 io_request->error_info = NULL;
656 io_request->raid_bypass = false;
659 static struct pqi_io_request *pqi_alloc_io_request(
660 struct pqi_ctrl_info *ctrl_info)
662 struct pqi_io_request *io_request;
663 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
666 io_request = &ctrl_info->io_request_pool[i];
667 if (atomic_inc_return(&io_request->refcount) == 1)
669 atomic_dec(&io_request->refcount);
670 i = (i + 1) % ctrl_info->max_io_slots;
674 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
676 pqi_reinit_io_request(io_request);
681 static void pqi_free_io_request(struct pqi_io_request *io_request)
683 atomic_dec(&io_request->refcount);
686 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
687 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
688 struct pqi_raid_error_info *error_info)
691 struct pqi_raid_path_request request;
692 enum dma_data_direction dir;
694 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
695 buffer, buffer_length, vpd_page, &dir);
699 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
701 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
706 /* helper functions for pqi_send_scsi_raid_request */
708 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
709 u8 cmd, void *buffer, size_t buffer_length)
711 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
712 buffer, buffer_length, 0, NULL);
715 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
716 u8 cmd, void *buffer, size_t buffer_length,
717 struct pqi_raid_error_info *error_info)
719 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
720 buffer, buffer_length, 0, error_info);
723 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
724 struct bmic_identify_controller *buffer)
726 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
727 buffer, sizeof(*buffer));
730 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
731 struct bmic_sense_subsystem_info *sense_info)
733 return pqi_send_ctrl_raid_request(ctrl_info,
734 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
735 sizeof(*sense_info));
738 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
739 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
741 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
742 buffer, buffer_length, vpd_page, NULL);
745 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
746 struct pqi_scsi_dev *device,
747 struct bmic_identify_physical_device *buffer, size_t buffer_length)
750 enum dma_data_direction dir;
751 u16 bmic_device_index;
752 struct pqi_raid_path_request request;
754 rc = pqi_build_raid_path_request(ctrl_info, &request,
755 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
756 buffer_length, 0, &dir);
760 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
761 request.cdb[2] = (u8)bmic_device_index;
762 request.cdb[9] = (u8)(bmic_device_index >> 8);
764 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
766 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
771 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
775 bytes = get_unaligned_le16(limit);
786 struct bmic_sense_feature_buffer {
787 struct bmic_sense_feature_buffer_header header;
788 struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
793 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
794 offsetofend(struct bmic_sense_feature_buffer, \
795 aio_subpage.max_write_raid_1_10_3drive)
797 #define MINIMUM_AIO_SUBPAGE_LENGTH \
798 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
799 max_write_raid_1_10_3drive) - \
800 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
802 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
805 enum dma_data_direction dir;
806 struct pqi_raid_path_request request;
807 struct bmic_sense_feature_buffer *buffer;
809 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
813 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
814 buffer, sizeof(*buffer), 0, &dir);
818 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
819 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
821 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
823 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
828 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
829 buffer->header.subpage_code !=
830 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
831 get_unaligned_le16(&buffer->header.buffer_length) <
832 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
833 buffer->aio_subpage.header.page_code !=
834 BMIC_SENSE_FEATURE_IO_PAGE ||
835 buffer->aio_subpage.header.subpage_code !=
836 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
837 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
838 MINIMUM_AIO_SUBPAGE_LENGTH) {
842 ctrl_info->max_transfer_encrypted_sas_sata =
843 pqi_aio_limit_to_bytes(
844 &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
846 ctrl_info->max_transfer_encrypted_nvme =
847 pqi_aio_limit_to_bytes(
848 &buffer->aio_subpage.max_transfer_encrypted_nvme);
850 ctrl_info->max_write_raid_5_6 =
851 pqi_aio_limit_to_bytes(
852 &buffer->aio_subpage.max_write_raid_5_6);
854 ctrl_info->max_write_raid_1_10_2drive =
855 pqi_aio_limit_to_bytes(
856 &buffer->aio_subpage.max_write_raid_1_10_2drive);
858 ctrl_info->max_write_raid_1_10_3drive =
859 pqi_aio_limit_to_bytes(
860 &buffer->aio_subpage.max_write_raid_1_10_3drive);
868 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
869 enum bmic_flush_cache_shutdown_event shutdown_event)
872 struct bmic_flush_cache *flush_cache;
874 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
878 flush_cache->shutdown_event = shutdown_event;
880 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
881 sizeof(*flush_cache));
888 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
889 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
890 struct pqi_raid_error_info *error_info)
892 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
893 buffer, buffer_length, error_info);
896 #define PQI_FETCH_PTRAID_DATA (1 << 31)
898 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
901 struct bmic_diag_options *diag;
903 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
907 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
908 diag, sizeof(*diag));
912 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
914 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
923 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
924 void *buffer, size_t buffer_length)
926 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
927 buffer, buffer_length);
932 struct bmic_host_wellness_driver_version {
934 u8 driver_version_tag[2];
935 __le16 driver_version_length;
936 char driver_version[32];
937 u8 dont_write_tag[2];
943 static int pqi_write_driver_version_to_host_wellness(
944 struct pqi_ctrl_info *ctrl_info)
947 struct bmic_host_wellness_driver_version *buffer;
948 size_t buffer_length;
950 buffer_length = sizeof(*buffer);
952 buffer = kmalloc(buffer_length, GFP_KERNEL);
956 buffer->start_tag[0] = '<';
957 buffer->start_tag[1] = 'H';
958 buffer->start_tag[2] = 'W';
959 buffer->start_tag[3] = '>';
960 buffer->driver_version_tag[0] = 'D';
961 buffer->driver_version_tag[1] = 'V';
962 put_unaligned_le16(sizeof(buffer->driver_version),
963 &buffer->driver_version_length);
964 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
965 sizeof(buffer->driver_version) - 1);
966 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
967 buffer->dont_write_tag[0] = 'D';
968 buffer->dont_write_tag[1] = 'W';
969 buffer->end_tag[0] = 'Z';
970 buffer->end_tag[1] = 'Z';
972 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
981 struct bmic_host_wellness_time {
986 u8 dont_write_tag[2];
992 static int pqi_write_current_time_to_host_wellness(
993 struct pqi_ctrl_info *ctrl_info)
996 struct bmic_host_wellness_time *buffer;
997 size_t buffer_length;
1002 buffer_length = sizeof(*buffer);
1004 buffer = kmalloc(buffer_length, GFP_KERNEL);
1008 buffer->start_tag[0] = '<';
1009 buffer->start_tag[1] = 'H';
1010 buffer->start_tag[2] = 'W';
1011 buffer->start_tag[3] = '>';
1012 buffer->time_tag[0] = 'T';
1013 buffer->time_tag[1] = 'D';
1014 put_unaligned_le16(sizeof(buffer->time),
1015 &buffer->time_length);
1017 local_time = ktime_get_real_seconds();
1018 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
1019 year = tm.tm_year + 1900;
1021 buffer->time[0] = bin2bcd(tm.tm_hour);
1022 buffer->time[1] = bin2bcd(tm.tm_min);
1023 buffer->time[2] = bin2bcd(tm.tm_sec);
1024 buffer->time[3] = 0;
1025 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1026 buffer->time[5] = bin2bcd(tm.tm_mday);
1027 buffer->time[6] = bin2bcd(year / 100);
1028 buffer->time[7] = bin2bcd(year % 100);
1030 buffer->dont_write_tag[0] = 'D';
1031 buffer->dont_write_tag[1] = 'W';
1032 buffer->end_tag[0] = 'Z';
1033 buffer->end_tag[1] = 'Z';
1035 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1042 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
1044 static void pqi_update_time_worker(struct work_struct *work)
1047 struct pqi_ctrl_info *ctrl_info;
1049 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1052 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1054 dev_warn(&ctrl_info->pci_dev->dev,
1055 "error updating time on controller\n");
1057 schedule_delayed_work(&ctrl_info->update_time_work,
1058 PQI_UPDATE_TIME_WORK_INTERVAL);
1061 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1063 schedule_delayed_work(&ctrl_info->update_time_work, 0);
1066 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1068 cancel_delayed_work_sync(&ctrl_info->update_time_work);
1071 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1072 size_t buffer_length)
1074 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1077 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1080 size_t lun_list_length;
1081 size_t lun_data_length;
1082 size_t new_lun_list_length;
1083 void *lun_data = NULL;
1084 struct report_lun_header *report_lun_header;
1086 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1087 if (!report_lun_header) {
1092 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1096 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1099 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1101 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1107 if (lun_list_length == 0) {
1108 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1112 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1116 new_lun_list_length =
1117 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1119 if (new_lun_list_length > lun_list_length) {
1120 lun_list_length = new_lun_list_length;
1126 kfree(report_lun_header);
1138 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1142 u8 rpl_response_format;
1144 size_t rpl_16byte_wwid_list_length;
1146 struct report_lun_header *rpl_header;
1147 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1148 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1150 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1154 if (ctrl_info->rpl_extended_format_4_5_supported) {
1155 rpl_header = rpl_list;
1156 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1157 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1160 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1161 dev_err(&ctrl_info->pci_dev->dev,
1162 "RPL returned unsupported data format %u\n",
1163 rpl_response_format);
1166 dev_warn(&ctrl_info->pci_dev->dev,
1167 "RPL returned extended format 2 instead of 4\n");
1171 rpl_8byte_wwid_list = rpl_list;
1172 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1173 rpl_16byte_wwid_list_length = sizeof(struct report_lun_header) + (num_physicals * sizeof(struct report_phys_lun_16byte_wwid));
1175 rpl_16byte_wwid_list = kmalloc(rpl_16byte_wwid_list_length, GFP_KERNEL);
1176 if (!rpl_16byte_wwid_list)
1179 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1180 &rpl_16byte_wwid_list->header.list_length);
1181 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1183 for (i = 0; i < num_physicals; i++) {
1184 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
1185 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1186 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
1187 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1188 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1189 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1190 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1191 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1194 kfree(rpl_8byte_wwid_list);
1195 *buffer = rpl_16byte_wwid_list;
1200 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1202 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1205 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1206 struct report_phys_lun_16byte_wwid_list **physdev_list,
1207 struct report_log_lun_list **logdev_list)
1210 size_t logdev_list_length;
1211 size_t logdev_data_length;
1212 struct report_log_lun_list *internal_logdev_list;
1213 struct report_log_lun_list *logdev_data;
1214 struct report_lun_header report_lun_header;
1216 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1218 dev_err(&ctrl_info->pci_dev->dev,
1219 "report physical LUNs failed\n");
1221 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1223 dev_err(&ctrl_info->pci_dev->dev,
1224 "report logical LUNs failed\n");
1227 * Tack the controller itself onto the end of the logical device list.
1230 logdev_data = *logdev_list;
1233 logdev_list_length =
1234 get_unaligned_be32(&logdev_data->header.list_length);
1236 memset(&report_lun_header, 0, sizeof(report_lun_header));
1238 (struct report_log_lun_list *)&report_lun_header;
1239 logdev_list_length = 0;
1242 logdev_data_length = sizeof(struct report_lun_header) +
1245 internal_logdev_list = kmalloc(logdev_data_length +
1246 sizeof(struct report_log_lun), GFP_KERNEL);
1247 if (!internal_logdev_list) {
1248 kfree(*logdev_list);
1249 *logdev_list = NULL;
1253 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1254 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1255 sizeof(struct report_log_lun));
1256 put_unaligned_be32(logdev_list_length +
1257 sizeof(struct report_log_lun),
1258 &internal_logdev_list->header.list_length);
1260 kfree(*logdev_list);
1261 *logdev_list = internal_logdev_list;
1266 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1267 int bus, int target, int lun)
1270 device->target = target;
1274 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1282 scsi3addr = device->scsi3addr;
1283 lunid = get_unaligned_le32(scsi3addr);
1285 if (pqi_is_hba_lunid(scsi3addr)) {
1286 /* The specified device is the controller. */
1287 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1288 device->target_lun_valid = true;
1292 if (pqi_is_logical_device(device)) {
1293 if (device->is_external_raid_device) {
1294 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1295 target = (lunid >> 16) & 0x3fff;
1298 bus = PQI_RAID_VOLUME_BUS;
1300 lun = lunid & 0x3fff;
1302 pqi_set_bus_target_lun(device, bus, target, lun);
1303 device->target_lun_valid = true;
1308 * Defer target and LUN assignment for non-controller physical devices
1309 * because the SAS transport layer will make these assignments later.
1311 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1314 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1315 struct pqi_scsi_dev *device)
1321 raid_level = SA_RAID_UNKNOWN;
1323 buffer = kmalloc(64, GFP_KERNEL);
1325 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1326 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1328 raid_level = buffer[8];
1329 if (raid_level > SA_RAID_MAX)
1330 raid_level = SA_RAID_UNKNOWN;
1335 device->raid_level = raid_level;
1338 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1339 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1343 u32 r5or6_blocks_per_row;
1345 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1347 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1348 err_msg = "RAID map too small";
1352 if (device->raid_level == SA_RAID_1) {
1353 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1354 err_msg = "invalid RAID-1 map";
1357 } else if (device->raid_level == SA_RAID_TRIPLE) {
1358 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1359 err_msg = "invalid RAID-1(Triple) map";
1362 } else if ((device->raid_level == SA_RAID_5 ||
1363 device->raid_level == SA_RAID_6) &&
1364 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1366 r5or6_blocks_per_row =
1367 get_unaligned_le16(&raid_map->strip_size) *
1368 get_unaligned_le16(&raid_map->data_disks_per_row);
1369 if (r5or6_blocks_per_row == 0) {
1370 err_msg = "invalid RAID-5 or RAID-6 map";
1378 dev_warn(&ctrl_info->pci_dev->dev,
1379 "logical device %08x%08x %s\n",
1380 *((u32 *)&device->scsi3addr),
1381 *((u32 *)&device->scsi3addr[4]), err_msg);
1386 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1387 struct pqi_scsi_dev *device)
1391 struct raid_map *raid_map;
1393 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1397 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1398 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1402 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1404 if (raid_map_size > sizeof(*raid_map)) {
1408 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1412 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1413 device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1417 if (get_unaligned_le32(&raid_map->structure_size)
1419 dev_warn(&ctrl_info->pci_dev->dev,
1420 "requested %u bytes, received %u bytes\n",
1422 get_unaligned_le32(&raid_map->structure_size));
1428 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1432 device->raid_map = raid_map;
1442 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1443 struct pqi_scsi_dev *device)
1445 if (!ctrl_info->lv_drive_type_mix_valid) {
1446 device->max_transfer_encrypted = ~0;
1450 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1451 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1452 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1453 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1454 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1455 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1456 case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1457 case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1458 device->max_transfer_encrypted =
1459 ctrl_info->max_transfer_encrypted_sas_sata;
1461 case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1462 device->max_transfer_encrypted =
1463 ctrl_info->max_transfer_encrypted_nvme;
1465 case LV_DRIVE_TYPE_MIX_UNKNOWN:
1466 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1468 device->max_transfer_encrypted =
1469 min(ctrl_info->max_transfer_encrypted_sas_sata,
1470 ctrl_info->max_transfer_encrypted_nvme);
1475 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1476 struct pqi_scsi_dev *device)
1482 buffer = kmalloc(64, GFP_KERNEL);
1486 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1487 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1491 #define RAID_BYPASS_STATUS 4
1492 #define RAID_BYPASS_CONFIGURED 0x1
1493 #define RAID_BYPASS_ENABLED 0x2
1495 bypass_status = buffer[RAID_BYPASS_STATUS];
1496 device->raid_bypass_configured =
1497 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1498 if (device->raid_bypass_configured &&
1499 (bypass_status & RAID_BYPASS_ENABLED) &&
1500 pqi_get_raid_map(ctrl_info, device) == 0) {
1501 device->raid_bypass_enabled = true;
1502 if (get_unaligned_le16(&device->raid_map->flags) &
1503 RAID_MAP_ENCRYPTION_ENABLED)
1504 pqi_set_max_transfer_encrypted(ctrl_info, device);
1512 * Use vendor-specific VPD to determine online/offline status of a volume.
1515 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1516 struct pqi_scsi_dev *device)
1520 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1521 bool volume_offline = true;
1523 struct ciss_vpd_logical_volume_status *vpd;
1525 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1529 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1530 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1534 if (vpd->page_code != CISS_VPD_LV_STATUS)
1537 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1538 volume_status) + vpd->page_length;
1539 if (page_length < sizeof(*vpd))
1542 volume_status = vpd->volume_status;
1543 volume_flags = get_unaligned_be32(&vpd->flags);
1544 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1549 device->volume_status = volume_status;
1550 device->volume_offline = volume_offline;
1553 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
1554 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
1556 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1557 struct pqi_scsi_dev *device,
1558 struct bmic_identify_physical_device *id_phys)
1562 memset(id_phys, 0, sizeof(*id_phys));
1564 rc = pqi_identify_physical_device(ctrl_info, device,
1565 id_phys, sizeof(*id_phys));
1567 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1571 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1572 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1574 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1575 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1577 device->box_index = id_phys->box_index;
1578 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1579 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1580 device->queue_depth =
1581 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1582 device->active_path_index = id_phys->active_path_number;
1583 device->path_map = id_phys->redundant_path_present_map;
1584 memcpy(&device->box,
1585 &id_phys->alternate_paths_phys_box_on_port,
1586 sizeof(device->box));
1587 memcpy(&device->phys_connector,
1588 &id_phys->alternate_paths_phys_connector,
1589 sizeof(device->phys_connector));
1590 device->bay = id_phys->phys_bay_in_box;
1592 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1595 id_phys->phy_to_phy_map[device->active_path_index];
1597 device->phy_id = 0xFF;
1599 device->ncq_prio_support =
1600 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
1601 PQI_DEVICE_NCQ_PRIO_SUPPORTED);
1606 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1607 struct pqi_scsi_dev *device)
1612 buffer = kmalloc(64, GFP_KERNEL);
1616 /* Send an inquiry to the device to see what it is. */
1617 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1621 scsi_sanitize_inquiry_string(&buffer[8], 8);
1622 scsi_sanitize_inquiry_string(&buffer[16], 16);
1624 device->devtype = buffer[0] & 0x1f;
1625 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1626 memcpy(device->model, &buffer[16], sizeof(device->model));
1628 if (device->devtype == TYPE_DISK) {
1629 if (device->is_external_raid_device) {
1630 device->raid_level = SA_RAID_UNKNOWN;
1631 device->volume_status = CISS_LV_OK;
1632 device->volume_offline = false;
1634 pqi_get_raid_level(ctrl_info, device);
1635 pqi_get_raid_bypass_status(ctrl_info, device);
1636 pqi_get_volume_status(ctrl_info, device);
1647 * Prevent adding drive to OS for some corner cases such as a drive
1648 * undergoing a sanitize operation. Some OSes will continue to poll
1649 * the drive until the sanitize completes, which can take hours,
1650 * resulting in long bootup delays. Commands such as TUR, READ_CAP
1651 * are allowed, but READ/WRITE cause check condition. So the OS
1652 * cannot check/read the partition table.
1653 * Note: devices that have completed sanitize must be re-enabled
1654 * using the management utility.
1656 static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info,
1657 struct pqi_scsi_dev *device)
1661 enum dma_data_direction dir;
1663 int buffer_length = 64;
1664 size_t sense_data_length;
1665 struct scsi_sense_hdr sshdr;
1666 struct pqi_raid_path_request request;
1667 struct pqi_raid_error_info error_info;
1668 bool offline = false; /* Assume keep online */
1670 /* Do not check controllers. */
1671 if (pqi_is_hba_lunid(device->scsi3addr))
1674 /* Do not check LVs. */
1675 if (pqi_is_logical_device(device))
1678 buffer = kmalloc(buffer_length, GFP_KERNEL);
1680 return false; /* Assume not offline */
1682 /* Check for SANITIZE in progress using TUR */
1683 rc = pqi_build_raid_path_request(ctrl_info, &request,
1684 TEST_UNIT_READY, RAID_CTLR_LUNID, buffer,
1685 buffer_length, 0, &dir);
1687 goto out; /* Assume not offline */
1689 memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number));
1691 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info);
1694 goto out; /* Assume not offline */
1696 scsi_status = error_info.status;
1697 sense_data_length = get_unaligned_le16(&error_info.sense_data_length);
1698 if (sense_data_length == 0)
1700 get_unaligned_le16(&error_info.response_data_length);
1701 if (sense_data_length) {
1702 if (sense_data_length > sizeof(error_info.data))
1703 sense_data_length = sizeof(error_info.data);
1706 * Check for sanitize in progress: asc:0x04, ascq: 0x1b
1708 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
1709 scsi_normalize_sense(error_info.data,
1710 sense_data_length, &sshdr) &&
1711 sshdr.sense_key == NOT_READY &&
1712 sshdr.asc == 0x04 &&
1713 sshdr.ascq == 0x1b) {
1714 device->device_offline = true;
1716 goto out; /* Keep device offline */
1725 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1726 struct pqi_scsi_dev *device,
1727 struct bmic_identify_physical_device *id_phys)
1731 if (device->is_expander_smp_device)
1734 if (pqi_is_logical_device(device))
1735 rc = pqi_get_logical_device_info(ctrl_info, device);
1737 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1742 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1743 struct pqi_scsi_dev *device)
1746 static const char unknown_state_str[] =
1747 "Volume is in an unknown state (%u)";
1748 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1750 switch (device->volume_status) {
1752 status = "Volume online";
1754 case CISS_LV_FAILED:
1755 status = "Volume failed";
1757 case CISS_LV_NOT_CONFIGURED:
1758 status = "Volume not configured";
1760 case CISS_LV_DEGRADED:
1761 status = "Volume degraded";
1763 case CISS_LV_READY_FOR_RECOVERY:
1764 status = "Volume ready for recovery operation";
1766 case CISS_LV_UNDERGOING_RECOVERY:
1767 status = "Volume undergoing recovery";
1769 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1770 status = "Wrong physical drive was replaced";
1772 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1773 status = "A physical drive not properly connected";
1775 case CISS_LV_HARDWARE_OVERHEATING:
1776 status = "Hardware is overheating";
1778 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1779 status = "Hardware has overheated";
1781 case CISS_LV_UNDERGOING_EXPANSION:
1782 status = "Volume undergoing expansion";
1784 case CISS_LV_NOT_AVAILABLE:
1785 status = "Volume waiting for transforming volume";
1787 case CISS_LV_QUEUED_FOR_EXPANSION:
1788 status = "Volume queued for expansion";
1790 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1791 status = "Volume disabled due to SCSI ID conflict";
1793 case CISS_LV_EJECTED:
1794 status = "Volume has been ejected";
1796 case CISS_LV_UNDERGOING_ERASE:
1797 status = "Volume undergoing background erase";
1799 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1800 status = "Volume ready for predictive spare rebuild";
1802 case CISS_LV_UNDERGOING_RPI:
1803 status = "Volume undergoing rapid parity initialization";
1805 case CISS_LV_PENDING_RPI:
1806 status = "Volume queued for rapid parity initialization";
1808 case CISS_LV_ENCRYPTED_NO_KEY:
1809 status = "Encrypted volume inaccessible - key not present";
1811 case CISS_LV_UNDERGOING_ENCRYPTION:
1812 status = "Volume undergoing encryption process";
1814 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1815 status = "Volume undergoing encryption re-keying process";
1817 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1818 status = "Volume encrypted but encryption is disabled";
1820 case CISS_LV_PENDING_ENCRYPTION:
1821 status = "Volume pending migration to encrypted state";
1823 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1824 status = "Volume pending encryption rekeying";
1826 case CISS_LV_NOT_SUPPORTED:
1827 status = "Volume not supported on this controller";
1829 case CISS_LV_STATUS_UNAVAILABLE:
1830 status = "Volume status not available";
1833 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1834 unknown_state_str, device->volume_status);
1835 status = unknown_state_buffer;
1839 dev_info(&ctrl_info->pci_dev->dev,
1840 "scsi %d:%d:%d:%d %s\n",
1841 ctrl_info->scsi_host->host_no,
1842 device->bus, device->target, device->lun, status);
1845 static void pqi_rescan_worker(struct work_struct *work)
1847 struct pqi_ctrl_info *ctrl_info;
1849 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1852 pqi_scan_scsi_devices(ctrl_info);
1855 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1856 struct pqi_scsi_dev *device)
1860 if (pqi_is_logical_device(device))
1861 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1862 device->target, device->lun);
1864 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1869 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
1871 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1875 rc = pqi_device_wait_for_pending_io(ctrl_info, device,
1876 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1878 dev_err(&ctrl_info->pci_dev->dev,
1879 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1880 ctrl_info->scsi_host->host_no, device->bus,
1881 device->target, device->lun,
1882 atomic_read(&device->scsi_cmds_outstanding));
1884 if (pqi_is_logical_device(device))
1885 scsi_remove_device(device->sdev);
1887 pqi_remove_sas_device(device);
1889 pqi_device_remove_start(device);
1892 /* Assumes the SCSI device list lock is held. */
1894 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1895 int bus, int target, int lun)
1897 struct pqi_scsi_dev *device;
1899 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1900 if (device->bus == bus && device->target == target && device->lun == lun)
1906 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1908 if (dev1->is_physical_device != dev2->is_physical_device)
1911 if (dev1->is_physical_device)
1912 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
1914 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1917 enum pqi_find_result {
1923 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1924 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1926 struct pqi_scsi_dev *device;
1928 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1929 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1930 *matching_device = device;
1931 if (pqi_device_equal(device_to_find, device)) {
1932 if (device_to_find->volume_offline)
1933 return DEVICE_CHANGED;
1936 return DEVICE_CHANGED;
1940 return DEVICE_NOT_FOUND;
1943 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1945 if (device->is_expander_smp_device)
1946 return "Enclosure SMP ";
1948 return scsi_device_type(device->devtype);
1951 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1953 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1954 char *action, struct pqi_scsi_dev *device)
1957 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1959 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1960 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1962 if (device->target_lun_valid)
1963 count += scnprintf(buffer + count,
1964 PQI_DEV_INFO_BUFFER_LENGTH - count,
1969 count += scnprintf(buffer + count,
1970 PQI_DEV_INFO_BUFFER_LENGTH - count,
1973 if (pqi_is_logical_device(device))
1974 count += scnprintf(buffer + count,
1975 PQI_DEV_INFO_BUFFER_LENGTH - count,
1977 *((u32 *)&device->scsi3addr),
1978 *((u32 *)&device->scsi3addr[4]));
1980 count += scnprintf(buffer + count,
1981 PQI_DEV_INFO_BUFFER_LENGTH - count,
1983 get_unaligned_be64(&device->wwid[0]),
1984 get_unaligned_be64(&device->wwid[8]));
1986 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1988 pqi_device_type(device),
1992 if (pqi_is_logical_device(device)) {
1993 if (device->devtype == TYPE_DISK)
1994 count += scnprintf(buffer + count,
1995 PQI_DEV_INFO_BUFFER_LENGTH - count,
1996 "SSDSmartPathCap%c En%c %-12s",
1997 device->raid_bypass_configured ? '+' : '-',
1998 device->raid_bypass_enabled ? '+' : '-',
1999 pqi_raid_level_to_string(device->raid_level));
2001 count += scnprintf(buffer + count,
2002 PQI_DEV_INFO_BUFFER_LENGTH - count,
2003 "AIO%c", device->aio_enabled ? '+' : '-');
2004 if (device->devtype == TYPE_DISK ||
2005 device->devtype == TYPE_ZBC)
2006 count += scnprintf(buffer + count,
2007 PQI_DEV_INFO_BUFFER_LENGTH - count,
2008 " qd=%-6d", device->queue_depth);
2011 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
2014 /* Assumes the SCSI device list lock is held. */
2016 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2017 struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
2019 existing_device->device_type = new_device->device_type;
2020 existing_device->bus = new_device->bus;
2021 if (new_device->target_lun_valid) {
2022 existing_device->target = new_device->target;
2023 existing_device->lun = new_device->lun;
2024 existing_device->target_lun_valid = true;
2027 if (pqi_is_logical_device(existing_device) &&
2028 ctrl_info->logical_volume_rescan_needed)
2029 existing_device->rescan = true;
2031 /* By definition, the scsi3addr and wwid fields are already the same. */
2033 existing_device->is_physical_device = new_device->is_physical_device;
2034 existing_device->is_external_raid_device =
2035 new_device->is_external_raid_device;
2036 existing_device->is_expander_smp_device =
2037 new_device->is_expander_smp_device;
2038 existing_device->aio_enabled = new_device->aio_enabled;
2039 memcpy(existing_device->vendor, new_device->vendor,
2040 sizeof(existing_device->vendor));
2041 memcpy(existing_device->model, new_device->model,
2042 sizeof(existing_device->model));
2043 existing_device->sas_address = new_device->sas_address;
2044 existing_device->raid_level = new_device->raid_level;
2045 existing_device->queue_depth = new_device->queue_depth;
2046 existing_device->aio_handle = new_device->aio_handle;
2047 existing_device->volume_status = new_device->volume_status;
2048 existing_device->active_path_index = new_device->active_path_index;
2049 existing_device->phy_id = new_device->phy_id;
2050 existing_device->path_map = new_device->path_map;
2051 existing_device->bay = new_device->bay;
2052 existing_device->box_index = new_device->box_index;
2053 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
2054 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
2055 memcpy(existing_device->box, new_device->box,
2056 sizeof(existing_device->box));
2057 memcpy(existing_device->phys_connector, new_device->phys_connector,
2058 sizeof(existing_device->phys_connector));
2059 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
2060 kfree(existing_device->raid_map);
2061 existing_device->raid_map = new_device->raid_map;
2062 existing_device->raid_bypass_configured =
2063 new_device->raid_bypass_configured;
2064 existing_device->raid_bypass_enabled =
2065 new_device->raid_bypass_enabled;
2066 existing_device->device_offline = false;
2068 /* To prevent this from being freed later. */
2069 new_device->raid_map = NULL;
2072 static inline void pqi_free_device(struct pqi_scsi_dev *device)
2075 kfree(device->raid_map);
2081 * Called when exposing a new device to the OS fails in order to re-adjust
2082 * our internal SCSI device list to match the SCSI ML's view.
2085 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2086 struct pqi_scsi_dev *device)
2088 unsigned long flags;
2090 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2091 list_del(&device->scsi_device_list_entry);
2092 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2094 /* Allow the device structure to be freed later. */
2095 device->keep_device = false;
2098 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2100 if (device->is_expander_smp_device)
2101 return device->sas_port != NULL;
2103 return device->sdev != NULL;
2106 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2107 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2111 unsigned long flags;
2112 enum pqi_find_result find_result;
2113 struct pqi_scsi_dev *device;
2114 struct pqi_scsi_dev *next;
2115 struct pqi_scsi_dev *matching_device;
2116 LIST_HEAD(add_list);
2117 LIST_HEAD(delete_list);
2120 * The idea here is to do as little work as possible while holding the
2121 * spinlock. That's why we go to great pains to defer anything other
2122 * than updating the internal device list until after we release the
2126 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2128 /* Assume that all devices in the existing list have gone away. */
2129 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
2130 device->device_gone = true;
2132 for (i = 0; i < num_new_devices; i++) {
2133 device = new_device_list[i];
2135 find_result = pqi_scsi_find_entry(ctrl_info, device,
2138 switch (find_result) {
2141 * The newly found device is already in the existing
2144 device->new_device = false;
2145 matching_device->device_gone = false;
2146 pqi_scsi_update_device(ctrl_info, matching_device, device);
2148 case DEVICE_NOT_FOUND:
2150 * The newly found device is NOT in the existing device
2153 device->new_device = true;
2155 case DEVICE_CHANGED:
2157 * The original device has gone away and we need to add
2160 device->new_device = true;
2165 /* Process all devices that have gone away. */
2166 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2167 scsi_device_list_entry) {
2168 if (device->device_gone) {
2169 list_del(&device->scsi_device_list_entry);
2170 list_add_tail(&device->delete_list_entry, &delete_list);
2174 /* Process all new devices. */
2175 for (i = 0; i < num_new_devices; i++) {
2176 device = new_device_list[i];
2177 if (!device->new_device)
2179 if (device->volume_offline)
2181 list_add_tail(&device->scsi_device_list_entry,
2182 &ctrl_info->scsi_device_list);
2183 list_add_tail(&device->add_list_entry, &add_list);
2184 /* To prevent this device structure from being freed later. */
2185 device->keep_device = true;
2188 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2191 * If OFA is in progress and there are devices that need to be deleted,
2192 * allow any pending reset operations to continue and unblock any SCSI
2193 * requests before removal.
2195 if (pqi_ofa_in_progress(ctrl_info)) {
2196 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2197 if (pqi_is_device_added(device))
2198 pqi_device_remove_start(device);
2199 pqi_ctrl_unblock_device_reset(ctrl_info);
2200 pqi_scsi_unblock_requests(ctrl_info);
2203 /* Remove all devices that have gone away. */
2204 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2205 if (device->volume_offline) {
2206 pqi_dev_info(ctrl_info, "offline", device);
2207 pqi_show_volume_status(ctrl_info, device);
2209 pqi_dev_info(ctrl_info, "removed", device);
2211 if (pqi_is_device_added(device))
2212 pqi_remove_device(ctrl_info, device);
2213 list_del(&device->delete_list_entry);
2214 pqi_free_device(device);
2218 * Notify the SML of any existing device changes such as;
2219 * queue depth, device size.
2221 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2222 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2223 device->advertised_queue_depth = device->queue_depth;
2224 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2225 if (device->rescan) {
2226 scsi_rescan_device(&device->sdev->sdev_gendev);
2227 device->rescan = false;
2232 /* Expose any new devices. */
2233 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2234 if (!pqi_is_device_added(device)) {
2235 rc = pqi_add_device(ctrl_info, device);
2237 pqi_dev_info(ctrl_info, "added", device);
2239 dev_warn(&ctrl_info->pci_dev->dev,
2240 "scsi %d:%d:%d:%d addition failed, device not added\n",
2241 ctrl_info->scsi_host->host_no,
2242 device->bus, device->target,
2244 pqi_fixup_botched_add(ctrl_info, device);
2249 ctrl_info->logical_volume_rescan_needed = false;
2253 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2256 * Only support the HBA controller itself as a RAID
2257 * controller. If it's a RAID controller other than
2258 * the HBA itself (an external RAID controller, for
2259 * example), we don't support it.
2261 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2262 !pqi_is_hba_lunid(device->scsi3addr))
2268 static inline bool pqi_skip_device(u8 *scsi3addr)
2270 /* Ignore all masked devices. */
2271 if (MASKED_DEVICE(scsi3addr))
2277 static inline void pqi_mask_device(u8 *scsi3addr)
2279 scsi3addr[3] |= 0xc0;
2282 static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
2284 if (pqi_is_logical_device(device))
2287 return (device->path_map & (device->path_map - 1)) != 0;
2290 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2292 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2295 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2299 LIST_HEAD(new_device_list_head);
2300 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2301 struct report_log_lun_list *logdev_list = NULL;
2302 struct report_phys_lun_16byte_wwid *phys_lun;
2303 struct report_log_lun *log_lun;
2304 struct bmic_identify_physical_device *id_phys = NULL;
2307 struct pqi_scsi_dev **new_device_list = NULL;
2308 struct pqi_scsi_dev *device;
2309 struct pqi_scsi_dev *next;
2310 unsigned int num_new_devices;
2311 unsigned int num_valid_devices;
2312 bool is_physical_device;
2314 unsigned int physical_index;
2315 unsigned int logical_index;
2316 static char *out_of_memory_msg =
2317 "failed to allocate memory, device discovery stopped";
2319 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2325 get_unaligned_be32(&physdev_list->header.list_length)
2326 / sizeof(physdev_list->lun_entries[0]);
2332 get_unaligned_be32(&logdev_list->header.list_length)
2333 / sizeof(logdev_list->lun_entries[0]);
2337 if (num_physicals) {
2339 * We need this buffer for calls to pqi_get_physical_disk_info()
2340 * below. We allocate it here instead of inside
2341 * pqi_get_physical_disk_info() because it's a fairly large
2344 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2346 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2352 if (pqi_hide_vsep) {
2353 for (i = num_physicals - 1; i >= 0; i--) {
2354 phys_lun = &physdev_list->lun_entries[i];
2355 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2356 pqi_mask_device(phys_lun->lunid);
2364 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2365 ctrl_info->lv_drive_type_mix_valid = true;
2367 num_new_devices = num_physicals + num_logicals;
2369 new_device_list = kmalloc_array(num_new_devices,
2370 sizeof(*new_device_list),
2372 if (!new_device_list) {
2373 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2378 for (i = 0; i < num_new_devices; i++) {
2379 device = kzalloc(sizeof(*device), GFP_KERNEL);
2381 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2386 list_add_tail(&device->new_device_list_entry,
2387 &new_device_list_head);
2391 num_valid_devices = 0;
2395 for (i = 0; i < num_new_devices; i++) {
2397 if ((!pqi_expose_ld_first && i < num_physicals) ||
2398 (pqi_expose_ld_first && i >= num_logicals)) {
2399 is_physical_device = true;
2400 phys_lun = &physdev_list->lun_entries[physical_index++];
2402 scsi3addr = phys_lun->lunid;
2404 is_physical_device = false;
2406 log_lun = &logdev_list->lun_entries[logical_index++];
2407 scsi3addr = log_lun->lunid;
2410 if (is_physical_device && pqi_skip_device(scsi3addr))
2414 device = list_next_entry(device, new_device_list_entry);
2416 device = list_first_entry(&new_device_list_head,
2417 struct pqi_scsi_dev, new_device_list_entry);
2419 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2420 device->is_physical_device = is_physical_device;
2421 if (is_physical_device) {
2422 device->device_type = phys_lun->device_type;
2423 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2424 device->is_expander_smp_device = true;
2426 device->is_external_raid_device =
2427 pqi_is_external_raid_addr(scsi3addr);
2430 if (!pqi_is_supported_device(device))
2433 /* Do not present disks that the OS cannot fully probe */
2434 if (pqi_keep_device_offline(ctrl_info, device))
2437 /* Gather information about the device. */
2438 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2439 if (rc == -ENOMEM) {
2440 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2445 if (device->is_physical_device)
2446 dev_warn(&ctrl_info->pci_dev->dev,
2447 "obtaining device info failed, skipping physical device %016llx%016llx\n",
2448 get_unaligned_be64(&phys_lun->wwid[0]),
2449 get_unaligned_be64(&phys_lun->wwid[8]));
2451 dev_warn(&ctrl_info->pci_dev->dev,
2452 "obtaining device info failed, skipping logical device %08x%08x\n",
2453 *((u32 *)&device->scsi3addr),
2454 *((u32 *)&device->scsi3addr[4]));
2459 pqi_assign_bus_target_lun(device);
2461 if (device->is_physical_device) {
2462 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
2463 if ((phys_lun->device_flags &
2464 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2465 phys_lun->aio_handle) {
2466 device->aio_enabled = true;
2467 device->aio_handle =
2468 phys_lun->aio_handle;
2471 memcpy(device->volume_id, log_lun->volume_id,
2472 sizeof(device->volume_id));
2475 device->sas_address = get_unaligned_be64(&device->wwid[0]);
2477 new_device_list[num_valid_devices++] = device;
2480 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2483 list_for_each_entry_safe(device, next, &new_device_list_head,
2484 new_device_list_entry) {
2485 if (device->keep_device)
2487 list_del(&device->new_device_list_entry);
2488 pqi_free_device(device);
2491 kfree(new_device_list);
2492 kfree(physdev_list);
2499 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2501 unsigned long flags;
2502 struct pqi_scsi_dev *device;
2503 struct pqi_scsi_dev *next;
2505 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2506 scsi_device_list_entry) {
2507 if (pqi_is_device_added(device))
2508 pqi_remove_device(ctrl_info, device);
2509 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2510 list_del(&device->scsi_device_list_entry);
2511 pqi_free_device(device);
2512 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2516 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2521 if (pqi_ctrl_offline(ctrl_info))
2524 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2526 if (!mutex_acquired) {
2527 if (pqi_ctrl_scan_blocked(ctrl_info))
2529 pqi_schedule_rescan_worker_delayed(ctrl_info);
2530 return -EINPROGRESS;
2533 rc = pqi_update_scsi_devices(ctrl_info);
2534 if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2535 pqi_schedule_rescan_worker_delayed(ctrl_info);
2537 mutex_unlock(&ctrl_info->scan_mutex);
2542 static void pqi_scan_start(struct Scsi_Host *shost)
2544 struct pqi_ctrl_info *ctrl_info;
2546 ctrl_info = shost_to_hba(shost);
2548 pqi_scan_scsi_devices(ctrl_info);
2551 /* Returns TRUE if scan is finished. */
2553 static int pqi_scan_finished(struct Scsi_Host *shost,
2554 unsigned long elapsed_time)
2556 struct pqi_ctrl_info *ctrl_info;
2558 ctrl_info = shost_priv(shost);
2560 return !mutex_is_locked(&ctrl_info->scan_mutex);
2563 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2564 struct raid_map *raid_map, u64 first_block)
2566 u32 volume_blk_size;
2569 * Set the encryption tweak values based on logical block address.
2570 * If the block size is 512, the tweak value is equal to the LBA.
2571 * For other block sizes, tweak value is (LBA * block size) / 512.
2573 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2574 if (volume_blk_size != 512)
2575 first_block = (first_block * volume_blk_size) / 512;
2577 encryption_info->data_encryption_key_index =
2578 get_unaligned_le16(&raid_map->data_encryption_key_index);
2579 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2580 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2584 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2587 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2588 struct pqi_scsi_dev_raid_map_data *rmd)
2590 bool is_supported = true;
2592 switch (rmd->raid_level) {
2596 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2597 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2598 is_supported = false;
2600 case SA_RAID_TRIPLE:
2601 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2602 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2603 is_supported = false;
2606 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2607 rmd->data_length > ctrl_info->max_write_raid_5_6))
2608 is_supported = false;
2611 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2612 rmd->data_length > ctrl_info->max_write_raid_5_6))
2613 is_supported = false;
2616 is_supported = false;
2620 return is_supported;
2623 #define PQI_RAID_BYPASS_INELIGIBLE 1
2625 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2626 struct pqi_scsi_dev_raid_map_data *rmd)
2628 /* Check for valid opcode, get LBA and block count. */
2629 switch (scmd->cmnd[0]) {
2631 rmd->is_write = true;
2634 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2635 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2636 rmd->block_cnt = (u32)scmd->cmnd[4];
2637 if (rmd->block_cnt == 0)
2638 rmd->block_cnt = 256;
2641 rmd->is_write = true;
2644 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2645 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2648 rmd->is_write = true;
2651 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2652 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2655 rmd->is_write = true;
2658 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2659 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2662 /* Process via normal I/O path. */
2663 return PQI_RAID_BYPASS_INELIGIBLE;
2666 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2671 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2672 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2674 #if BITS_PER_LONG == 32
2678 rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2680 /* Check for invalid block or wraparound. */
2681 if (rmd->last_block >=
2682 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2683 rmd->last_block < rmd->first_block)
2684 return PQI_RAID_BYPASS_INELIGIBLE;
2686 rmd->data_disks_per_row =
2687 get_unaligned_le16(&raid_map->data_disks_per_row);
2688 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2689 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2691 /* Calculate stripe information for the request. */
2692 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2693 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2694 return PQI_RAID_BYPASS_INELIGIBLE;
2695 #if BITS_PER_LONG == 32
2696 tmpdiv = rmd->first_block;
2697 do_div(tmpdiv, rmd->blocks_per_row);
2698 rmd->first_row = tmpdiv;
2699 tmpdiv = rmd->last_block;
2700 do_div(tmpdiv, rmd->blocks_per_row);
2701 rmd->last_row = tmpdiv;
2702 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2703 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2704 tmpdiv = rmd->first_row_offset;
2705 do_div(tmpdiv, rmd->strip_size);
2706 rmd->first_column = tmpdiv;
2707 tmpdiv = rmd->last_row_offset;
2708 do_div(tmpdiv, rmd->strip_size);
2709 rmd->last_column = tmpdiv;
2711 rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2712 rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2713 rmd->first_row_offset = (u32)(rmd->first_block -
2714 (rmd->first_row * rmd->blocks_per_row));
2715 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2716 rmd->blocks_per_row));
2717 rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2718 rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2721 /* If this isn't a single row/column then give to the controller. */
2722 if (rmd->first_row != rmd->last_row ||
2723 rmd->first_column != rmd->last_column)
2724 return PQI_RAID_BYPASS_INELIGIBLE;
2726 /* Proceeding with driver mapping. */
2727 rmd->total_disks_per_row = rmd->data_disks_per_row +
2728 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2729 rmd->map_row = ((u32)(rmd->first_row >>
2730 raid_map->parity_rotation_shift)) %
2731 get_unaligned_le16(&raid_map->row_cnt);
2732 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2738 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2739 struct raid_map *raid_map)
2741 #if BITS_PER_LONG == 32
2745 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2746 return PQI_RAID_BYPASS_INELIGIBLE;
2749 /* Verify first and last block are in same RAID group. */
2750 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2751 #if BITS_PER_LONG == 32
2752 tmpdiv = rmd->first_block;
2753 rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2754 tmpdiv = rmd->first_group;
2755 do_div(tmpdiv, rmd->blocks_per_row);
2756 rmd->first_group = tmpdiv;
2757 tmpdiv = rmd->last_block;
2758 rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2759 tmpdiv = rmd->last_group;
2760 do_div(tmpdiv, rmd->blocks_per_row);
2761 rmd->last_group = tmpdiv;
2763 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2764 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2766 if (rmd->first_group != rmd->last_group)
2767 return PQI_RAID_BYPASS_INELIGIBLE;
2769 /* Verify request is in a single row of RAID 5/6. */
2770 #if BITS_PER_LONG == 32
2771 tmpdiv = rmd->first_block;
2772 do_div(tmpdiv, rmd->stripesize);
2773 rmd->first_row = tmpdiv;
2774 rmd->r5or6_first_row = tmpdiv;
2775 tmpdiv = rmd->last_block;
2776 do_div(tmpdiv, rmd->stripesize);
2777 rmd->r5or6_last_row = tmpdiv;
2779 rmd->first_row = rmd->r5or6_first_row =
2780 rmd->first_block / rmd->stripesize;
2781 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2783 if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2784 return PQI_RAID_BYPASS_INELIGIBLE;
2786 /* Verify request is in a single column. */
2787 #if BITS_PER_LONG == 32
2788 tmpdiv = rmd->first_block;
2789 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2790 tmpdiv = rmd->first_row_offset;
2791 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2792 rmd->r5or6_first_row_offset = rmd->first_row_offset;
2793 tmpdiv = rmd->last_block;
2794 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2795 tmpdiv = rmd->r5or6_last_row_offset;
2796 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2797 tmpdiv = rmd->r5or6_first_row_offset;
2798 do_div(tmpdiv, rmd->strip_size);
2799 rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2800 tmpdiv = rmd->r5or6_last_row_offset;
2801 do_div(tmpdiv, rmd->strip_size);
2802 rmd->r5or6_last_column = tmpdiv;
2804 rmd->first_row_offset = rmd->r5or6_first_row_offset =
2805 (u32)((rmd->first_block % rmd->stripesize) %
2806 rmd->blocks_per_row);
2808 rmd->r5or6_last_row_offset =
2809 (u32)((rmd->last_block % rmd->stripesize) %
2810 rmd->blocks_per_row);
2813 rmd->r5or6_first_row_offset / rmd->strip_size;
2814 rmd->r5or6_first_column = rmd->first_column;
2815 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2817 if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2818 return PQI_RAID_BYPASS_INELIGIBLE;
2820 /* Request is eligible. */
2822 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2823 get_unaligned_le16(&raid_map->row_cnt);
2825 rmd->map_index = (rmd->first_group *
2826 (get_unaligned_le16(&raid_map->row_cnt) *
2827 rmd->total_disks_per_row)) +
2828 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2830 if (rmd->is_write) {
2834 * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2835 * parity entries inside the device's raid_map.
2837 * A device's RAID map is bounded by: number of RAID disks squared.
2839 * The devices RAID map size is checked during device
2842 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2843 index *= rmd->total_disks_per_row;
2844 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2846 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2847 if (rmd->raid_level == SA_RAID_6) {
2848 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2849 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2851 #if BITS_PER_LONG == 32
2852 tmpdiv = rmd->first_block;
2853 do_div(tmpdiv, rmd->blocks_per_row);
2856 rmd->row = rmd->first_block / rmd->blocks_per_row;
2863 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2865 /* Build the new CDB for the physical disk I/O. */
2866 if (rmd->disk_block > 0xffffffff) {
2867 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2869 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2870 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2873 rmd->cdb_length = 16;
2875 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2877 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2879 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2881 rmd->cdb_length = 10;
2885 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2886 struct pqi_scsi_dev_raid_map_data *rmd)
2891 group = rmd->map_index / rmd->data_disks_per_row;
2893 index = rmd->map_index - (group * rmd->data_disks_per_row);
2894 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2895 index += rmd->data_disks_per_row;
2896 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2897 if (rmd->layout_map_count > 2) {
2898 index += rmd->data_disks_per_row;
2899 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2902 rmd->num_it_nexus_entries = rmd->layout_map_count;
2905 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2906 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2907 struct pqi_queue_group *queue_group)
2910 struct raid_map *raid_map;
2912 u32 next_bypass_group;
2913 struct pqi_encryption_info *encryption_info_ptr;
2914 struct pqi_encryption_info encryption_info;
2915 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2917 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2919 return PQI_RAID_BYPASS_INELIGIBLE;
2921 rmd.raid_level = device->raid_level;
2923 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2924 return PQI_RAID_BYPASS_INELIGIBLE;
2926 if (unlikely(rmd.block_cnt == 0))
2927 return PQI_RAID_BYPASS_INELIGIBLE;
2929 raid_map = device->raid_map;
2931 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2933 return PQI_RAID_BYPASS_INELIGIBLE;
2935 if (device->raid_level == SA_RAID_1 ||
2936 device->raid_level == SA_RAID_TRIPLE) {
2938 pqi_calc_aio_r1_nexus(raid_map, &rmd);
2940 group = device->next_bypass_group[rmd.map_index];
2941 next_bypass_group = group + 1;
2942 if (next_bypass_group >= rmd.layout_map_count)
2943 next_bypass_group = 0;
2944 device->next_bypass_group[rmd.map_index] = next_bypass_group;
2945 rmd.map_index += group * rmd.data_disks_per_row;
2947 } else if ((device->raid_level == SA_RAID_5 ||
2948 device->raid_level == SA_RAID_6) &&
2949 (rmd.layout_map_count > 1 || rmd.is_write)) {
2950 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
2952 return PQI_RAID_BYPASS_INELIGIBLE;
2955 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
2956 return PQI_RAID_BYPASS_INELIGIBLE;
2958 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
2959 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2960 rmd.first_row * rmd.strip_size +
2961 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
2962 rmd.disk_block_cnt = rmd.block_cnt;
2964 /* Handle differing logical/physical block sizes. */
2965 if (raid_map->phys_blk_shift) {
2966 rmd.disk_block <<= raid_map->phys_blk_shift;
2967 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
2970 if (unlikely(rmd.disk_block_cnt > 0xffff))
2971 return PQI_RAID_BYPASS_INELIGIBLE;
2973 pqi_set_aio_cdb(&rmd);
2975 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
2976 if (rmd.data_length > device->max_transfer_encrypted)
2977 return PQI_RAID_BYPASS_INELIGIBLE;
2978 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
2979 encryption_info_ptr = &encryption_info;
2981 encryption_info_ptr = NULL;
2985 switch (device->raid_level) {
2987 case SA_RAID_TRIPLE:
2988 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
2989 encryption_info_ptr, device, &rmd);
2992 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
2993 encryption_info_ptr, device, &rmd);
2997 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
2998 rmd.cdb, rmd.cdb_length, queue_group,
2999 encryption_info_ptr, true, false);
3002 #define PQI_STATUS_IDLE 0x0
3004 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
3005 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
3007 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
3008 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
3009 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
3010 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
3011 #define PQI_DEVICE_STATE_ERROR 0x4
3013 #define PQI_MODE_READY_TIMEOUT_SECS 30
3014 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
3016 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3018 struct pqi_device_registers __iomem *pqi_registers;
3019 unsigned long timeout;
3023 pqi_registers = ctrl_info->pqi_registers;
3024 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
3027 signature = readq(&pqi_registers->signature);
3028 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3029 sizeof(signature)) == 0)
3031 if (time_after(jiffies, timeout)) {
3032 dev_err(&ctrl_info->pci_dev->dev,
3033 "timed out waiting for PQI signature\n");
3036 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3040 status = readb(&pqi_registers->function_and_status_code);
3041 if (status == PQI_STATUS_IDLE)
3043 if (time_after(jiffies, timeout)) {
3044 dev_err(&ctrl_info->pci_dev->dev,
3045 "timed out waiting for PQI IDLE\n");
3048 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3052 if (readl(&pqi_registers->device_status) ==
3053 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3055 if (time_after(jiffies, timeout)) {
3056 dev_err(&ctrl_info->pci_dev->dev,
3057 "timed out waiting for PQI all registers ready\n");
3060 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3066 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3068 struct pqi_scsi_dev *device;
3070 device = io_request->scmd->device->hostdata;
3071 device->raid_bypass_enabled = false;
3072 device->aio_enabled = false;
3075 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
3077 struct pqi_ctrl_info *ctrl_info;
3078 struct pqi_scsi_dev *device;
3080 device = sdev->hostdata;
3081 if (device->device_offline)
3084 device->device_offline = true;
3085 ctrl_info = shost_to_hba(sdev->host);
3086 pqi_schedule_rescan_worker(ctrl_info);
3087 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
3088 path, ctrl_info->scsi_host->host_no, device->bus,
3089 device->target, device->lun);
3092 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3096 struct scsi_cmnd *scmd;
3097 struct pqi_raid_error_info *error_info;
3098 size_t sense_data_length;
3101 struct scsi_sense_hdr sshdr;
3103 scmd = io_request->scmd;
3107 error_info = io_request->error_info;
3108 scsi_status = error_info->status;
3111 switch (error_info->data_out_result) {
3112 case PQI_DATA_IN_OUT_GOOD:
3114 case PQI_DATA_IN_OUT_UNDERFLOW:
3116 get_unaligned_le32(&error_info->data_out_transferred);
3117 residual_count = scsi_bufflen(scmd) - xfer_count;
3118 scsi_set_resid(scmd, residual_count);
3119 if (xfer_count < scmd->underflow)
3120 host_byte = DID_SOFT_ERROR;
3122 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3123 case PQI_DATA_IN_OUT_ABORTED:
3124 host_byte = DID_ABORT;
3126 case PQI_DATA_IN_OUT_TIMEOUT:
3127 host_byte = DID_TIME_OUT;
3129 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3130 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3131 case PQI_DATA_IN_OUT_BUFFER_ERROR:
3132 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3133 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3134 case PQI_DATA_IN_OUT_ERROR:
3135 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3136 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3137 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3138 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3139 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3140 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3141 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3142 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3143 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3144 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3146 host_byte = DID_ERROR;
3150 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3151 if (sense_data_length == 0)
3153 get_unaligned_le16(&error_info->response_data_length);
3154 if (sense_data_length) {
3155 if (sense_data_length > sizeof(error_info->data))
3156 sense_data_length = sizeof(error_info->data);
3158 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3159 scsi_normalize_sense(error_info->data,
3160 sense_data_length, &sshdr) &&
3161 sshdr.sense_key == HARDWARE_ERROR &&
3162 sshdr.asc == 0x3e) {
3163 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3164 struct pqi_scsi_dev *device = scmd->device->hostdata;
3166 switch (sshdr.ascq) {
3167 case 0x1: /* LOGICAL UNIT FAILURE */
3168 if (printk_ratelimit())
3169 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3170 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3171 pqi_take_device_offline(scmd->device, "RAID");
3172 host_byte = DID_NO_CONNECT;
3175 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3176 if (printk_ratelimit())
3177 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3178 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3183 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3184 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3185 memcpy(scmd->sense_buffer, error_info->data,
3189 scmd->result = scsi_status;
3190 set_host_byte(scmd, host_byte);
3193 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3197 struct scsi_cmnd *scmd;
3198 struct pqi_aio_error_info *error_info;
3199 size_t sense_data_length;
3202 bool device_offline;
3203 struct pqi_scsi_dev *device;
3205 scmd = io_request->scmd;
3206 error_info = io_request->error_info;
3208 sense_data_length = 0;
3209 device_offline = false;
3210 device = scmd->device->hostdata;
3212 switch (error_info->service_response) {
3213 case PQI_AIO_SERV_RESPONSE_COMPLETE:
3214 scsi_status = error_info->status;
3216 case PQI_AIO_SERV_RESPONSE_FAILURE:
3217 switch (error_info->status) {
3218 case PQI_AIO_STATUS_IO_ABORTED:
3219 scsi_status = SAM_STAT_TASK_ABORTED;
3221 case PQI_AIO_STATUS_UNDERRUN:
3222 scsi_status = SAM_STAT_GOOD;
3223 residual_count = get_unaligned_le32(
3224 &error_info->residual_count);
3225 scsi_set_resid(scmd, residual_count);
3226 xfer_count = scsi_bufflen(scmd) - residual_count;
3227 if (xfer_count < scmd->underflow)
3228 host_byte = DID_SOFT_ERROR;
3230 case PQI_AIO_STATUS_OVERRUN:
3231 scsi_status = SAM_STAT_GOOD;
3233 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3234 pqi_aio_path_disabled(io_request);
3235 if (pqi_is_multipath_device(device)) {
3236 pqi_device_remove_start(device);
3237 host_byte = DID_NO_CONNECT;
3238 scsi_status = SAM_STAT_CHECK_CONDITION;
3240 scsi_status = SAM_STAT_GOOD;
3241 io_request->status = -EAGAIN;
3244 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3245 case PQI_AIO_STATUS_INVALID_DEVICE:
3246 if (!io_request->raid_bypass) {
3247 device_offline = true;
3248 pqi_take_device_offline(scmd->device, "AIO");
3249 host_byte = DID_NO_CONNECT;
3251 scsi_status = SAM_STAT_CHECK_CONDITION;
3253 case PQI_AIO_STATUS_IO_ERROR:
3255 scsi_status = SAM_STAT_CHECK_CONDITION;
3259 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3260 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3261 scsi_status = SAM_STAT_GOOD;
3263 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3264 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3266 scsi_status = SAM_STAT_CHECK_CONDITION;
3270 if (error_info->data_present) {
3272 get_unaligned_le16(&error_info->data_length);
3273 if (sense_data_length) {
3274 if (sense_data_length > sizeof(error_info->data))
3275 sense_data_length = sizeof(error_info->data);
3276 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3277 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3278 memcpy(scmd->sense_buffer, error_info->data,
3283 if (device_offline && sense_data_length == 0)
3284 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3286 scmd->result = scsi_status;
3287 set_host_byte(scmd, host_byte);
3290 static void pqi_process_io_error(unsigned int iu_type,
3291 struct pqi_io_request *io_request)
3294 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3295 pqi_process_raid_io_error(io_request);
3297 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3298 pqi_process_aio_io_error(io_request);
3303 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3304 struct pqi_task_management_response *response)
3308 switch (response->response_code) {
3309 case SOP_TMF_COMPLETE:
3310 case SOP_TMF_FUNCTION_SUCCEEDED:
3313 case SOP_TMF_REJECTED:
3322 dev_err(&ctrl_info->pci_dev->dev,
3323 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3328 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3329 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
3331 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
3334 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3339 struct pqi_io_request *io_request;
3340 struct pqi_io_response *response;
3344 oq_ci = queue_group->oq_ci_copy;
3347 oq_pi = readl(queue_group->oq_pi);
3348 if (oq_pi >= ctrl_info->num_elements_per_oq) {
3349 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
3350 dev_err(&ctrl_info->pci_dev->dev,
3351 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3352 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3359 response = queue_group->oq_element_array +
3360 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3362 request_id = get_unaligned_le16(&response->request_id);
3363 if (request_id >= ctrl_info->max_io_slots) {
3364 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
3365 dev_err(&ctrl_info->pci_dev->dev,
3366 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
3367 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3371 io_request = &ctrl_info->io_request_pool[request_id];
3372 if (atomic_read(&io_request->refcount) == 0) {
3373 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
3374 dev_err(&ctrl_info->pci_dev->dev,
3375 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
3376 request_id, oq_pi, oq_ci);
3380 switch (response->header.iu_type) {
3381 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3382 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3383 if (io_request->scmd)
3384 io_request->scmd->result = 0;
3386 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3388 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3389 io_request->status =
3391 &((struct pqi_vendor_general_response *)response)->status);
3393 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3394 io_request->status = pqi_interpret_task_management_response(ctrl_info,
3397 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3398 pqi_aio_path_disabled(io_request);
3399 io_request->status = -EAGAIN;
3401 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3402 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3403 io_request->error_info = ctrl_info->error_buffer +
3404 (get_unaligned_le16(&response->error_index) *
3405 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3406 pqi_process_io_error(response->header.iu_type, io_request);
3409 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
3410 dev_err(&ctrl_info->pci_dev->dev,
3411 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
3412 response->header.iu_type, oq_pi, oq_ci);
3416 io_request->io_complete_callback(io_request, io_request->context);
3419 * Note that the I/O request structure CANNOT BE TOUCHED after
3420 * returning from the I/O completion callback!
3422 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3425 if (num_responses) {
3426 queue_group->oq_ci_copy = oq_ci;
3427 writel(oq_ci, queue_group->oq_ci);
3430 return num_responses;
3433 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3434 unsigned int ci, unsigned int elements_in_queue)
3436 unsigned int num_elements_used;
3439 num_elements_used = pi - ci;
3441 num_elements_used = elements_in_queue - ci + pi;
3443 return elements_in_queue - num_elements_used - 1;
3446 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3447 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3451 unsigned long flags;
3453 struct pqi_queue_group *queue_group;
3455 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3456 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3459 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3461 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3462 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3464 if (pqi_num_elements_free(iq_pi, iq_ci,
3465 ctrl_info->num_elements_per_iq))
3468 spin_unlock_irqrestore(
3469 &queue_group->submit_lock[RAID_PATH], flags);
3471 if (pqi_ctrl_offline(ctrl_info))
3475 next_element = queue_group->iq_element_array[RAID_PATH] +
3476 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3478 memcpy(next_element, iu, iu_length);
3480 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3481 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3484 * This write notifies the controller that an IU is available to be
3487 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3489 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3492 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3493 struct pqi_event *event)
3495 struct pqi_event_acknowledge_request request;
3497 memset(&request, 0, sizeof(request));
3499 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3500 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3501 &request.header.iu_length);
3502 request.event_type = event->event_type;
3503 put_unaligned_le16(event->event_id, &request.event_id);
3504 put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3506 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3509 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3510 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3512 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3513 struct pqi_ctrl_info *ctrl_info)
3516 unsigned long timeout;
3518 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
3521 status = pqi_read_soft_reset_status(ctrl_info);
3522 if (status & PQI_SOFT_RESET_INITIATE)
3523 return RESET_INITIATE_DRIVER;
3525 if (status & PQI_SOFT_RESET_ABORT)
3528 if (!sis_is_firmware_running(ctrl_info))
3529 return RESET_NORESPONSE;
3531 if (time_after(jiffies, timeout)) {
3532 dev_warn(&ctrl_info->pci_dev->dev,
3533 "timed out waiting for soft reset status\n");
3534 return RESET_TIMEDOUT;
3537 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3541 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3544 unsigned int delay_secs;
3545 enum pqi_soft_reset_status reset_status;
3547 if (ctrl_info->soft_reset_handshake_supported)
3548 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3550 reset_status = RESET_INITIATE_FIRMWARE;
3552 delay_secs = PQI_POST_RESET_DELAY_SECS;
3554 switch (reset_status) {
3555 case RESET_TIMEDOUT:
3556 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3558 case RESET_INITIATE_DRIVER:
3559 dev_info(&ctrl_info->pci_dev->dev,
3560 "Online Firmware Activation: resetting controller\n");
3561 sis_soft_reset(ctrl_info);
3563 case RESET_INITIATE_FIRMWARE:
3564 ctrl_info->pqi_mode_enabled = false;
3565 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3566 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3567 pqi_ofa_free_host_buffer(ctrl_info);
3568 pqi_ctrl_ofa_done(ctrl_info);
3569 dev_info(&ctrl_info->pci_dev->dev,
3570 "Online Firmware Activation: %s\n",
3571 rc == 0 ? "SUCCESS" : "FAILED");
3574 dev_info(&ctrl_info->pci_dev->dev,
3575 "Online Firmware Activation ABORTED\n");
3576 if (ctrl_info->soft_reset_handshake_supported)
3577 pqi_clear_soft_reset_status(ctrl_info);
3578 pqi_ofa_free_host_buffer(ctrl_info);
3579 pqi_ctrl_ofa_done(ctrl_info);
3580 pqi_ofa_ctrl_unquiesce(ctrl_info);
3582 case RESET_NORESPONSE:
3585 dev_err(&ctrl_info->pci_dev->dev,
3586 "unexpected Online Firmware Activation reset status: 0x%x\n",
3588 pqi_ofa_free_host_buffer(ctrl_info);
3589 pqi_ctrl_ofa_done(ctrl_info);
3590 pqi_ofa_ctrl_unquiesce(ctrl_info);
3591 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
3596 static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3598 struct pqi_ctrl_info *ctrl_info;
3600 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3602 pqi_ctrl_ofa_start(ctrl_info);
3603 pqi_ofa_setup_host_buffer(ctrl_info);
3604 pqi_ofa_host_memory_update(ctrl_info);
3607 static void pqi_ofa_quiesce_worker(struct work_struct *work)
3609 struct pqi_ctrl_info *ctrl_info;
3610 struct pqi_event *event;
3612 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3614 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3616 pqi_ofa_ctrl_quiesce(ctrl_info);
3617 pqi_acknowledge_event(ctrl_info, event);
3618 pqi_process_soft_reset(ctrl_info);
3621 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3622 struct pqi_event *event)
3628 switch (event->event_id) {
3629 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3630 dev_info(&ctrl_info->pci_dev->dev,
3631 "received Online Firmware Activation memory allocation request\n");
3632 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3634 case PQI_EVENT_OFA_QUIESCE:
3635 dev_info(&ctrl_info->pci_dev->dev,
3636 "received Online Firmware Activation quiesce request\n");
3637 schedule_work(&ctrl_info->ofa_quiesce_work);
3640 case PQI_EVENT_OFA_CANCELED:
3641 dev_info(&ctrl_info->pci_dev->dev,
3642 "received Online Firmware Activation cancel request: reason: %u\n",
3643 ctrl_info->ofa_cancel_reason);
3644 pqi_ofa_free_host_buffer(ctrl_info);
3645 pqi_ctrl_ofa_done(ctrl_info);
3648 dev_err(&ctrl_info->pci_dev->dev,
3649 "received unknown Online Firmware Activation request: event ID: %u\n",
3657 static void pqi_event_worker(struct work_struct *work)
3661 struct pqi_ctrl_info *ctrl_info;
3662 struct pqi_event *event;
3665 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3667 pqi_ctrl_busy(ctrl_info);
3668 pqi_wait_if_ctrl_blocked(ctrl_info);
3669 if (pqi_ctrl_offline(ctrl_info))
3672 rescan_needed = false;
3673 event = ctrl_info->events;
3674 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3675 if (event->pending) {
3676 event->pending = false;
3677 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3678 ack_event = pqi_ofa_process_event(ctrl_info, event);
3681 rescan_needed = true;
3682 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
3683 ctrl_info->logical_volume_rescan_needed = true;
3686 pqi_acknowledge_event(ctrl_info, event);
3692 pqi_schedule_rescan_worker_delayed(ctrl_info);
3695 pqi_ctrl_unbusy(ctrl_info);
3698 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
3700 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3703 u32 heartbeat_count;
3704 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
3706 pqi_check_ctrl_health(ctrl_info);
3707 if (pqi_ctrl_offline(ctrl_info))
3710 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3711 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3713 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3714 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3715 dev_err(&ctrl_info->pci_dev->dev,
3716 "no heartbeat detected - last heartbeat count: %u\n",
3718 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
3722 ctrl_info->previous_num_interrupts = num_interrupts;
3725 ctrl_info->previous_heartbeat_count = heartbeat_count;
3726 mod_timer(&ctrl_info->heartbeat_timer,
3727 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3730 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3732 if (!ctrl_info->heartbeat_counter)
3735 ctrl_info->previous_num_interrupts =
3736 atomic_read(&ctrl_info->num_interrupts);
3737 ctrl_info->previous_heartbeat_count =
3738 pqi_read_heartbeat_counter(ctrl_info);
3740 ctrl_info->heartbeat_timer.expires =
3741 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3742 add_timer(&ctrl_info->heartbeat_timer);
3745 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3747 del_timer_sync(&ctrl_info->heartbeat_timer);
3750 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3751 struct pqi_event *event, struct pqi_event_response *response)
3753 switch (event->event_id) {
3754 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3755 ctrl_info->ofa_bytes_requested =
3756 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3758 case PQI_EVENT_OFA_CANCELED:
3759 ctrl_info->ofa_cancel_reason =
3760 get_unaligned_le16(&response->data.ofa_cancelled.reason);
3765 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3770 struct pqi_event_queue *event_queue;
3771 struct pqi_event_response *response;
3772 struct pqi_event *event;
3775 event_queue = &ctrl_info->event_queue;
3777 oq_ci = event_queue->oq_ci_copy;
3780 oq_pi = readl(event_queue->oq_pi);
3781 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3782 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
3783 dev_err(&ctrl_info->pci_dev->dev,
3784 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3785 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3793 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3795 event_index = pqi_event_type_to_event_index(response->event_type);
3797 if (event_index >= 0 && response->request_acknowledge) {
3798 event = &ctrl_info->events[event_index];
3799 event->pending = true;
3800 event->event_type = response->event_type;
3801 event->event_id = get_unaligned_le16(&response->event_id);
3802 event->additional_event_id =
3803 get_unaligned_le32(&response->additional_event_id);
3804 if (event->event_type == PQI_EVENT_TYPE_OFA)
3805 pqi_ofa_capture_event_payload(ctrl_info, event, response);
3808 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3812 event_queue->oq_ci_copy = oq_ci;
3813 writel(oq_ci, event_queue->oq_ci);
3814 schedule_work(&ctrl_info->event_work);
3820 #define PQI_LEGACY_INTX_MASK 0x1
3822 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3825 struct pqi_device_registers __iomem *pqi_registers;
3826 volatile void __iomem *register_addr;
3828 pqi_registers = ctrl_info->pqi_registers;
3831 register_addr = &pqi_registers->legacy_intx_mask_clear;
3833 register_addr = &pqi_registers->legacy_intx_mask_set;
3835 intx_mask = readl(register_addr);
3836 intx_mask |= PQI_LEGACY_INTX_MASK;
3837 writel(intx_mask, register_addr);
3840 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3841 enum pqi_irq_mode new_mode)
3843 switch (ctrl_info->irq_mode) {
3849 pqi_configure_legacy_intx(ctrl_info, true);
3850 sis_enable_intx(ctrl_info);
3859 pqi_configure_legacy_intx(ctrl_info, false);
3860 sis_enable_msix(ctrl_info);
3865 pqi_configure_legacy_intx(ctrl_info, false);
3872 sis_enable_msix(ctrl_info);
3875 pqi_configure_legacy_intx(ctrl_info, true);
3876 sis_enable_intx(ctrl_info);
3884 ctrl_info->irq_mode = new_mode;
3887 #define PQI_LEGACY_INTX_PENDING 0x1
3889 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3894 switch (ctrl_info->irq_mode) {
3899 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
3900 if (intx_status & PQI_LEGACY_INTX_PENDING)
3914 static irqreturn_t pqi_irq_handler(int irq, void *data)
3916 struct pqi_ctrl_info *ctrl_info;
3917 struct pqi_queue_group *queue_group;
3918 int num_io_responses_handled;
3919 int num_events_handled;
3922 ctrl_info = queue_group->ctrl_info;
3924 if (!pqi_is_valid_irq(ctrl_info))
3927 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3928 if (num_io_responses_handled < 0)
3931 if (irq == ctrl_info->event_irq) {
3932 num_events_handled = pqi_process_event_intr(ctrl_info);
3933 if (num_events_handled < 0)
3936 num_events_handled = 0;
3939 if (num_io_responses_handled + num_events_handled > 0)
3940 atomic_inc(&ctrl_info->num_interrupts);
3942 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3943 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3949 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3951 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3955 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3957 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3958 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3959 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3961 dev_err(&pci_dev->dev,
3962 "irq %u init failed with error %d\n",
3963 pci_irq_vector(pci_dev, i), rc);
3966 ctrl_info->num_msix_vectors_initialized++;
3972 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3976 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3977 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3978 &ctrl_info->queue_groups[i]);
3980 ctrl_info->num_msix_vectors_initialized = 0;
3983 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3985 int num_vectors_enabled;
3987 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3988 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3989 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3990 if (num_vectors_enabled < 0) {
3991 dev_err(&ctrl_info->pci_dev->dev,
3992 "MSI-X init failed with error %d\n",
3993 num_vectors_enabled);
3994 return num_vectors_enabled;
3997 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
3998 ctrl_info->irq_mode = IRQ_MODE_MSIX;
4002 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4004 if (ctrl_info->num_msix_vectors_enabled) {
4005 pci_free_irq_vectors(ctrl_info->pci_dev);
4006 ctrl_info->num_msix_vectors_enabled = 0;
4010 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4013 size_t alloc_length;
4014 size_t element_array_length_per_iq;
4015 size_t element_array_length_per_oq;
4016 void *element_array;
4017 void __iomem *next_queue_index;
4018 void *aligned_pointer;
4019 unsigned int num_inbound_queues;
4020 unsigned int num_outbound_queues;
4021 unsigned int num_queue_indexes;
4022 struct pqi_queue_group *queue_group;
4024 element_array_length_per_iq =
4025 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4026 ctrl_info->num_elements_per_iq;
4027 element_array_length_per_oq =
4028 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4029 ctrl_info->num_elements_per_oq;
4030 num_inbound_queues = ctrl_info->num_queue_groups * 2;
4031 num_outbound_queues = ctrl_info->num_queue_groups;
4032 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4034 aligned_pointer = NULL;
4036 for (i = 0; i < num_inbound_queues; i++) {
4037 aligned_pointer = PTR_ALIGN(aligned_pointer,
4038 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4039 aligned_pointer += element_array_length_per_iq;
4042 for (i = 0; i < num_outbound_queues; i++) {
4043 aligned_pointer = PTR_ALIGN(aligned_pointer,
4044 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4045 aligned_pointer += element_array_length_per_oq;
4048 aligned_pointer = PTR_ALIGN(aligned_pointer,
4049 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4050 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4051 PQI_EVENT_OQ_ELEMENT_LENGTH;
4053 for (i = 0; i < num_queue_indexes; i++) {
4054 aligned_pointer = PTR_ALIGN(aligned_pointer,
4055 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4056 aligned_pointer += sizeof(pqi_index_t);
4059 alloc_length = (size_t)aligned_pointer +
4060 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4062 alloc_length += PQI_EXTRA_SGL_MEMORY;
4064 ctrl_info->queue_memory_base =
4065 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4066 &ctrl_info->queue_memory_base_dma_handle,
4069 if (!ctrl_info->queue_memory_base)
4072 ctrl_info->queue_memory_length = alloc_length;
4074 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4075 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4077 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4078 queue_group = &ctrl_info->queue_groups[i];
4079 queue_group->iq_element_array[RAID_PATH] = element_array;
4080 queue_group->iq_element_array_bus_addr[RAID_PATH] =
4081 ctrl_info->queue_memory_base_dma_handle +
4082 (element_array - ctrl_info->queue_memory_base);
4083 element_array += element_array_length_per_iq;
4084 element_array = PTR_ALIGN(element_array,
4085 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4086 queue_group->iq_element_array[AIO_PATH] = element_array;
4087 queue_group->iq_element_array_bus_addr[AIO_PATH] =
4088 ctrl_info->queue_memory_base_dma_handle +
4089 (element_array - ctrl_info->queue_memory_base);
4090 element_array += element_array_length_per_iq;
4091 element_array = PTR_ALIGN(element_array,
4092 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4095 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4096 queue_group = &ctrl_info->queue_groups[i];
4097 queue_group->oq_element_array = element_array;
4098 queue_group->oq_element_array_bus_addr =
4099 ctrl_info->queue_memory_base_dma_handle +
4100 (element_array - ctrl_info->queue_memory_base);
4101 element_array += element_array_length_per_oq;
4102 element_array = PTR_ALIGN(element_array,
4103 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4106 ctrl_info->event_queue.oq_element_array = element_array;
4107 ctrl_info->event_queue.oq_element_array_bus_addr =
4108 ctrl_info->queue_memory_base_dma_handle +
4109 (element_array - ctrl_info->queue_memory_base);
4110 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4111 PQI_EVENT_OQ_ELEMENT_LENGTH;
4113 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
4114 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4116 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4117 queue_group = &ctrl_info->queue_groups[i];
4118 queue_group->iq_ci[RAID_PATH] = next_queue_index;
4119 queue_group->iq_ci_bus_addr[RAID_PATH] =
4120 ctrl_info->queue_memory_base_dma_handle +
4122 (void __iomem *)ctrl_info->queue_memory_base);
4123 next_queue_index += sizeof(pqi_index_t);
4124 next_queue_index = PTR_ALIGN(next_queue_index,
4125 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4126 queue_group->iq_ci[AIO_PATH] = next_queue_index;
4127 queue_group->iq_ci_bus_addr[AIO_PATH] =
4128 ctrl_info->queue_memory_base_dma_handle +
4130 (void __iomem *)ctrl_info->queue_memory_base);
4131 next_queue_index += sizeof(pqi_index_t);
4132 next_queue_index = PTR_ALIGN(next_queue_index,
4133 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4134 queue_group->oq_pi = next_queue_index;
4135 queue_group->oq_pi_bus_addr =
4136 ctrl_info->queue_memory_base_dma_handle +
4138 (void __iomem *)ctrl_info->queue_memory_base);
4139 next_queue_index += sizeof(pqi_index_t);
4140 next_queue_index = PTR_ALIGN(next_queue_index,
4141 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4144 ctrl_info->event_queue.oq_pi = next_queue_index;
4145 ctrl_info->event_queue.oq_pi_bus_addr =
4146 ctrl_info->queue_memory_base_dma_handle +
4148 (void __iomem *)ctrl_info->queue_memory_base);
4153 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4156 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4157 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4160 * Initialize the backpointers to the controller structure in
4161 * each operational queue group structure.
4163 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4164 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4167 * Assign IDs to all operational queues. Note that the IDs
4168 * assigned to operational IQs are independent of the IDs
4169 * assigned to operational OQs.
4171 ctrl_info->event_queue.oq_id = next_oq_id++;
4172 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4173 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4174 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4175 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4179 * Assign MSI-X table entry indexes to all queues. Note that the
4180 * interrupt for the event queue is shared with the first queue group.
4182 ctrl_info->event_queue.int_msg_num = 0;
4183 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4184 ctrl_info->queue_groups[i].int_msg_num = i;
4186 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4187 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4188 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4189 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4190 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4194 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4196 size_t alloc_length;
4197 struct pqi_admin_queues_aligned *admin_queues_aligned;
4198 struct pqi_admin_queues *admin_queues;
4200 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4201 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4203 ctrl_info->admin_queue_memory_base =
4204 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4205 &ctrl_info->admin_queue_memory_base_dma_handle,
4208 if (!ctrl_info->admin_queue_memory_base)
4211 ctrl_info->admin_queue_memory_length = alloc_length;
4213 admin_queues = &ctrl_info->admin_queues;
4214 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4215 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4216 admin_queues->iq_element_array =
4217 &admin_queues_aligned->iq_element_array;
4218 admin_queues->oq_element_array =
4219 &admin_queues_aligned->oq_element_array;
4220 admin_queues->iq_ci =
4221 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4222 admin_queues->oq_pi =
4223 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4225 admin_queues->iq_element_array_bus_addr =
4226 ctrl_info->admin_queue_memory_base_dma_handle +
4227 (admin_queues->iq_element_array -
4228 ctrl_info->admin_queue_memory_base);
4229 admin_queues->oq_element_array_bus_addr =
4230 ctrl_info->admin_queue_memory_base_dma_handle +
4231 (admin_queues->oq_element_array -
4232 ctrl_info->admin_queue_memory_base);
4233 admin_queues->iq_ci_bus_addr =
4234 ctrl_info->admin_queue_memory_base_dma_handle +
4235 ((void __iomem *)admin_queues->iq_ci -
4236 (void __iomem *)ctrl_info->admin_queue_memory_base);
4237 admin_queues->oq_pi_bus_addr =
4238 ctrl_info->admin_queue_memory_base_dma_handle +
4239 ((void __iomem *)admin_queues->oq_pi -
4240 (void __iomem *)ctrl_info->admin_queue_memory_base);
4245 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
4246 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
4248 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4250 struct pqi_device_registers __iomem *pqi_registers;
4251 struct pqi_admin_queues *admin_queues;
4252 unsigned long timeout;
4256 pqi_registers = ctrl_info->pqi_registers;
4257 admin_queues = &ctrl_info->admin_queues;
4259 writeq((u64)admin_queues->iq_element_array_bus_addr,
4260 &pqi_registers->admin_iq_element_array_addr);
4261 writeq((u64)admin_queues->oq_element_array_bus_addr,
4262 &pqi_registers->admin_oq_element_array_addr);
4263 writeq((u64)admin_queues->iq_ci_bus_addr,
4264 &pqi_registers->admin_iq_ci_addr);
4265 writeq((u64)admin_queues->oq_pi_bus_addr,
4266 &pqi_registers->admin_oq_pi_addr);
4268 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4269 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4270 (admin_queues->int_msg_num << 16);
4271 writel(reg, &pqi_registers->admin_iq_num_elements);
4273 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4274 &pqi_registers->function_and_status_code);
4276 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4278 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4279 status = readb(&pqi_registers->function_and_status_code);
4280 if (status == PQI_STATUS_IDLE)
4282 if (time_after(jiffies, timeout))
4287 * The offset registers are not initialized to the correct
4288 * offsets until *after* the create admin queue pair command
4289 * completes successfully.
4291 admin_queues->iq_pi = ctrl_info->iomem_base +
4292 PQI_DEVICE_REGISTERS_OFFSET +
4293 readq(&pqi_registers->admin_iq_pi_offset);
4294 admin_queues->oq_ci = ctrl_info->iomem_base +
4295 PQI_DEVICE_REGISTERS_OFFSET +
4296 readq(&pqi_registers->admin_oq_ci_offset);
4301 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4302 struct pqi_general_admin_request *request)
4304 struct pqi_admin_queues *admin_queues;
4308 admin_queues = &ctrl_info->admin_queues;
4309 iq_pi = admin_queues->iq_pi_copy;
4311 next_element = admin_queues->iq_element_array +
4312 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4314 memcpy(next_element, request, sizeof(*request));
4316 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4317 admin_queues->iq_pi_copy = iq_pi;
4320 * This write notifies the controller that an IU is available to be
4323 writel(iq_pi, admin_queues->iq_pi);
4326 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
4328 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4329 struct pqi_general_admin_response *response)
4331 struct pqi_admin_queues *admin_queues;
4334 unsigned long timeout;
4336 admin_queues = &ctrl_info->admin_queues;
4337 oq_ci = admin_queues->oq_ci_copy;
4339 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
4342 oq_pi = readl(admin_queues->oq_pi);
4345 if (time_after(jiffies, timeout)) {
4346 dev_err(&ctrl_info->pci_dev->dev,
4347 "timed out waiting for admin response\n");
4350 if (!sis_is_firmware_running(ctrl_info))
4352 usleep_range(1000, 2000);
4355 memcpy(response, admin_queues->oq_element_array +
4356 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4358 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4359 admin_queues->oq_ci_copy = oq_ci;
4360 writel(oq_ci, admin_queues->oq_ci);
4365 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4366 struct pqi_queue_group *queue_group, enum pqi_io_path path,
4367 struct pqi_io_request *io_request)
4369 struct pqi_io_request *next;
4374 unsigned long flags;
4375 unsigned int num_elements_needed;
4376 unsigned int num_elements_to_end_of_queue;
4378 struct pqi_iu_header *request;
4380 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4383 io_request->queue_group = queue_group;
4384 list_add_tail(&io_request->request_list_entry,
4385 &queue_group->request_list[path]);
4388 iq_pi = queue_group->iq_pi_copy[path];
4390 list_for_each_entry_safe(io_request, next,
4391 &queue_group->request_list[path], request_list_entry) {
4393 request = io_request->iu;
4395 iu_length = get_unaligned_le16(&request->iu_length) +
4396 PQI_REQUEST_HEADER_LENGTH;
4397 num_elements_needed =
4398 DIV_ROUND_UP(iu_length,
4399 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4401 iq_ci = readl(queue_group->iq_ci[path]);
4403 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4404 ctrl_info->num_elements_per_iq))
4407 put_unaligned_le16(queue_group->oq_id,
4408 &request->response_queue_id);
4410 next_element = queue_group->iq_element_array[path] +
4411 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4413 num_elements_to_end_of_queue =
4414 ctrl_info->num_elements_per_iq - iq_pi;
4416 if (num_elements_needed <= num_elements_to_end_of_queue) {
4417 memcpy(next_element, request, iu_length);
4419 copy_count = num_elements_to_end_of_queue *
4420 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4421 memcpy(next_element, request, copy_count);
4422 memcpy(queue_group->iq_element_array[path],
4423 (u8 *)request + copy_count,
4424 iu_length - copy_count);
4427 iq_pi = (iq_pi + num_elements_needed) %
4428 ctrl_info->num_elements_per_iq;
4430 list_del(&io_request->request_list_entry);
4433 if (iq_pi != queue_group->iq_pi_copy[path]) {
4434 queue_group->iq_pi_copy[path] = iq_pi;
4436 * This write notifies the controller that one or more IUs are
4437 * available to be processed.
4439 writel(iq_pi, queue_group->iq_pi[path]);
4442 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4445 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4447 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4448 struct completion *wait)
4453 if (wait_for_completion_io_timeout(wait,
4454 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
4459 pqi_check_ctrl_health(ctrl_info);
4460 if (pqi_ctrl_offline(ctrl_info)) {
4469 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4472 struct completion *waiting = context;
4477 static int pqi_process_raid_io_error_synchronous(
4478 struct pqi_raid_error_info *error_info)
4482 switch (error_info->data_out_result) {
4483 case PQI_DATA_IN_OUT_GOOD:
4484 if (error_info->status == SAM_STAT_GOOD)
4487 case PQI_DATA_IN_OUT_UNDERFLOW:
4488 if (error_info->status == SAM_STAT_GOOD ||
4489 error_info->status == SAM_STAT_CHECK_CONDITION)
4492 case PQI_DATA_IN_OUT_ABORTED:
4493 rc = PQI_CMD_STATUS_ABORTED;
4500 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4502 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4505 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4506 struct pqi_iu_header *request, unsigned int flags,
4507 struct pqi_raid_error_info *error_info)
4510 struct pqi_io_request *io_request;
4512 DECLARE_COMPLETION_ONSTACK(wait);
4514 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4515 if (down_interruptible(&ctrl_info->sync_request_sem))
4516 return -ERESTARTSYS;
4518 down(&ctrl_info->sync_request_sem);
4521 pqi_ctrl_busy(ctrl_info);
4523 * Wait for other admin queue updates such as;
4524 * config table changes, OFA memory updates, ...
4526 if (pqi_is_blockable_request(request))
4527 pqi_wait_if_ctrl_blocked(ctrl_info);
4529 if (pqi_ctrl_offline(ctrl_info)) {
4534 io_request = pqi_alloc_io_request(ctrl_info);
4536 put_unaligned_le16(io_request->index,
4537 &(((struct pqi_raid_path_request *)request)->request_id));
4539 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4540 ((struct pqi_raid_path_request *)request)->error_index =
4541 ((struct pqi_raid_path_request *)request)->request_id;
4543 iu_length = get_unaligned_le16(&request->iu_length) +
4544 PQI_REQUEST_HEADER_LENGTH;
4545 memcpy(io_request->iu, request, iu_length);
4547 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4548 io_request->context = &wait;
4550 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4553 pqi_wait_for_completion_io(ctrl_info, &wait);
4556 if (io_request->error_info)
4557 memcpy(error_info, io_request->error_info, sizeof(*error_info));
4559 memset(error_info, 0, sizeof(*error_info));
4560 } else if (rc == 0 && io_request->error_info) {
4561 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4564 pqi_free_io_request(io_request);
4567 pqi_ctrl_unbusy(ctrl_info);
4568 up(&ctrl_info->sync_request_sem);
4573 static int pqi_validate_admin_response(
4574 struct pqi_general_admin_response *response, u8 expected_function_code)
4576 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4579 if (get_unaligned_le16(&response->header.iu_length) !=
4580 PQI_GENERAL_ADMIN_IU_LENGTH)
4583 if (response->function_code != expected_function_code)
4586 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4592 static int pqi_submit_admin_request_synchronous(
4593 struct pqi_ctrl_info *ctrl_info,
4594 struct pqi_general_admin_request *request,
4595 struct pqi_general_admin_response *response)
4599 pqi_submit_admin_request(ctrl_info, request);
4601 rc = pqi_poll_for_admin_response(ctrl_info, response);
4604 rc = pqi_validate_admin_response(response, request->function_code);
4609 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4612 struct pqi_general_admin_request request;
4613 struct pqi_general_admin_response response;
4614 struct pqi_device_capability *capability;
4615 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4617 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4621 memset(&request, 0, sizeof(request));
4623 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4624 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4625 &request.header.iu_length);
4626 request.function_code =
4627 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4628 put_unaligned_le32(sizeof(*capability),
4629 &request.data.report_device_capability.buffer_length);
4631 rc = pqi_map_single(ctrl_info->pci_dev,
4632 &request.data.report_device_capability.sg_descriptor,
4633 capability, sizeof(*capability),
4638 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4640 pqi_pci_unmap(ctrl_info->pci_dev,
4641 &request.data.report_device_capability.sg_descriptor, 1,
4647 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4652 ctrl_info->max_inbound_queues =
4653 get_unaligned_le16(&capability->max_inbound_queues);
4654 ctrl_info->max_elements_per_iq =
4655 get_unaligned_le16(&capability->max_elements_per_iq);
4656 ctrl_info->max_iq_element_length =
4657 get_unaligned_le16(&capability->max_iq_element_length)
4659 ctrl_info->max_outbound_queues =
4660 get_unaligned_le16(&capability->max_outbound_queues);
4661 ctrl_info->max_elements_per_oq =
4662 get_unaligned_le16(&capability->max_elements_per_oq);
4663 ctrl_info->max_oq_element_length =
4664 get_unaligned_le16(&capability->max_oq_element_length)
4667 sop_iu_layer_descriptor =
4668 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4670 ctrl_info->max_inbound_iu_length_per_firmware =
4672 &sop_iu_layer_descriptor->max_inbound_iu_length);
4673 ctrl_info->inbound_spanning_supported =
4674 sop_iu_layer_descriptor->inbound_spanning_supported;
4675 ctrl_info->outbound_spanning_supported =
4676 sop_iu_layer_descriptor->outbound_spanning_supported;
4684 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4686 if (ctrl_info->max_iq_element_length <
4687 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4688 dev_err(&ctrl_info->pci_dev->dev,
4689 "max. inbound queue element length of %d is less than the required length of %d\n",
4690 ctrl_info->max_iq_element_length,
4691 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4695 if (ctrl_info->max_oq_element_length <
4696 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4697 dev_err(&ctrl_info->pci_dev->dev,
4698 "max. outbound queue element length of %d is less than the required length of %d\n",
4699 ctrl_info->max_oq_element_length,
4700 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4704 if (ctrl_info->max_inbound_iu_length_per_firmware <
4705 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4706 dev_err(&ctrl_info->pci_dev->dev,
4707 "max. inbound IU length of %u is less than the min. required length of %d\n",
4708 ctrl_info->max_inbound_iu_length_per_firmware,
4709 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4713 if (!ctrl_info->inbound_spanning_supported) {
4714 dev_err(&ctrl_info->pci_dev->dev,
4715 "the controller does not support inbound spanning\n");
4719 if (ctrl_info->outbound_spanning_supported) {
4720 dev_err(&ctrl_info->pci_dev->dev,
4721 "the controller supports outbound spanning but this driver does not\n");
4728 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4731 struct pqi_event_queue *event_queue;
4732 struct pqi_general_admin_request request;
4733 struct pqi_general_admin_response response;
4735 event_queue = &ctrl_info->event_queue;
4738 * Create OQ (Outbound Queue - device to host queue) to dedicate
4741 memset(&request, 0, sizeof(request));
4742 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4743 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4744 &request.header.iu_length);
4745 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4746 put_unaligned_le16(event_queue->oq_id,
4747 &request.data.create_operational_oq.queue_id);
4748 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4749 &request.data.create_operational_oq.element_array_addr);
4750 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4751 &request.data.create_operational_oq.pi_addr);
4752 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4753 &request.data.create_operational_oq.num_elements);
4754 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4755 &request.data.create_operational_oq.element_length);
4756 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4757 put_unaligned_le16(event_queue->int_msg_num,
4758 &request.data.create_operational_oq.int_msg_num);
4760 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4765 event_queue->oq_ci = ctrl_info->iomem_base +
4766 PQI_DEVICE_REGISTERS_OFFSET +
4768 &response.data.create_operational_oq.oq_ci_offset);
4773 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4774 unsigned int group_number)
4777 struct pqi_queue_group *queue_group;
4778 struct pqi_general_admin_request request;
4779 struct pqi_general_admin_response response;
4781 queue_group = &ctrl_info->queue_groups[group_number];
4784 * Create IQ (Inbound Queue - host to device queue) for
4787 memset(&request, 0, sizeof(request));
4788 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4789 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4790 &request.header.iu_length);
4791 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4792 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4793 &request.data.create_operational_iq.queue_id);
4795 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4796 &request.data.create_operational_iq.element_array_addr);
4797 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4798 &request.data.create_operational_iq.ci_addr);
4799 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4800 &request.data.create_operational_iq.num_elements);
4801 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4802 &request.data.create_operational_iq.element_length);
4803 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4805 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4808 dev_err(&ctrl_info->pci_dev->dev,
4809 "error creating inbound RAID queue\n");
4813 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4814 PQI_DEVICE_REGISTERS_OFFSET +
4816 &response.data.create_operational_iq.iq_pi_offset);
4819 * Create IQ (Inbound Queue - host to device queue) for
4820 * Advanced I/O (AIO) path.
4822 memset(&request, 0, sizeof(request));
4823 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4824 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4825 &request.header.iu_length);
4826 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4827 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4828 &request.data.create_operational_iq.queue_id);
4829 put_unaligned_le64((u64)queue_group->
4830 iq_element_array_bus_addr[AIO_PATH],
4831 &request.data.create_operational_iq.element_array_addr);
4832 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4833 &request.data.create_operational_iq.ci_addr);
4834 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4835 &request.data.create_operational_iq.num_elements);
4836 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4837 &request.data.create_operational_iq.element_length);
4838 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4840 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4843 dev_err(&ctrl_info->pci_dev->dev,
4844 "error creating inbound AIO queue\n");
4848 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4849 PQI_DEVICE_REGISTERS_OFFSET +
4851 &response.data.create_operational_iq.iq_pi_offset);
4854 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4855 * assumed to be for RAID path I/O unless we change the queue's
4858 memset(&request, 0, sizeof(request));
4859 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4860 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4861 &request.header.iu_length);
4862 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4863 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4864 &request.data.change_operational_iq_properties.queue_id);
4865 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4866 &request.data.change_operational_iq_properties.vendor_specific);
4868 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4871 dev_err(&ctrl_info->pci_dev->dev,
4872 "error changing queue property\n");
4877 * Create OQ (Outbound Queue - device to host queue).
4879 memset(&request, 0, sizeof(request));
4880 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4881 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4882 &request.header.iu_length);
4883 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4884 put_unaligned_le16(queue_group->oq_id,
4885 &request.data.create_operational_oq.queue_id);
4886 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4887 &request.data.create_operational_oq.element_array_addr);
4888 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4889 &request.data.create_operational_oq.pi_addr);
4890 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4891 &request.data.create_operational_oq.num_elements);
4892 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4893 &request.data.create_operational_oq.element_length);
4894 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4895 put_unaligned_le16(queue_group->int_msg_num,
4896 &request.data.create_operational_oq.int_msg_num);
4898 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4901 dev_err(&ctrl_info->pci_dev->dev,
4902 "error creating outbound queue\n");
4906 queue_group->oq_ci = ctrl_info->iomem_base +
4907 PQI_DEVICE_REGISTERS_OFFSET +
4909 &response.data.create_operational_oq.oq_ci_offset);
4914 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4919 rc = pqi_create_event_queue(ctrl_info);
4921 dev_err(&ctrl_info->pci_dev->dev,
4922 "error creating event queue\n");
4926 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4927 rc = pqi_create_queue_group(ctrl_info, i);
4929 dev_err(&ctrl_info->pci_dev->dev,
4930 "error creating queue group number %u/%u\n",
4931 i, ctrl_info->num_queue_groups);
4939 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4940 struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
4942 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4947 struct pqi_event_config *event_config;
4948 struct pqi_event_descriptor *event_descriptor;
4949 struct pqi_general_management_request request;
4951 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4956 memset(&request, 0, sizeof(request));
4958 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4959 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4960 data.report_event_configuration.sg_descriptors[1]) -
4961 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4962 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4963 &request.data.report_event_configuration.buffer_length);
4965 rc = pqi_map_single(ctrl_info->pci_dev,
4966 request.data.report_event_configuration.sg_descriptors,
4967 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4972 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
4974 pqi_pci_unmap(ctrl_info->pci_dev,
4975 request.data.report_event_configuration.sg_descriptors, 1,
4981 for (i = 0; i < event_config->num_event_descriptors; i++) {
4982 event_descriptor = &event_config->descriptors[i];
4983 if (enable_events &&
4984 pqi_is_supported_event(event_descriptor->event_type))
4985 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4986 &event_descriptor->oq_id);
4988 put_unaligned_le16(0, &event_descriptor->oq_id);
4991 memset(&request, 0, sizeof(request));
4993 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4994 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4995 data.report_event_configuration.sg_descriptors[1]) -
4996 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4997 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4998 &request.data.report_event_configuration.buffer_length);
5000 rc = pqi_map_single(ctrl_info->pci_dev,
5001 request.data.report_event_configuration.sg_descriptors,
5002 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5007 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5009 pqi_pci_unmap(ctrl_info->pci_dev,
5010 request.data.report_event_configuration.sg_descriptors, 1,
5014 kfree(event_config);
5019 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5021 return pqi_configure_events(ctrl_info, true);
5024 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5028 size_t sg_chain_buffer_length;
5029 struct pqi_io_request *io_request;
5031 if (!ctrl_info->io_request_pool)
5034 dev = &ctrl_info->pci_dev->dev;
5035 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5036 io_request = ctrl_info->io_request_pool;
5038 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5039 kfree(io_request->iu);
5040 if (!io_request->sg_chain_buffer)
5042 dma_free_coherent(dev, sg_chain_buffer_length,
5043 io_request->sg_chain_buffer,
5044 io_request->sg_chain_buffer_dma_handle);
5048 kfree(ctrl_info->io_request_pool);
5049 ctrl_info->io_request_pool = NULL;
5052 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5054 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5055 ctrl_info->error_buffer_length,
5056 &ctrl_info->error_buffer_dma_handle,
5058 if (!ctrl_info->error_buffer)
5064 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5067 void *sg_chain_buffer;
5068 size_t sg_chain_buffer_length;
5069 dma_addr_t sg_chain_buffer_dma_handle;
5071 struct pqi_io_request *io_request;
5073 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5074 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
5076 if (!ctrl_info->io_request_pool) {
5077 dev_err(&ctrl_info->pci_dev->dev,
5078 "failed to allocate I/O request pool\n");
5082 dev = &ctrl_info->pci_dev->dev;
5083 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5084 io_request = ctrl_info->io_request_pool;
5086 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5087 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
5089 if (!io_request->iu) {
5090 dev_err(&ctrl_info->pci_dev->dev,
5091 "failed to allocate IU buffers\n");
5095 sg_chain_buffer = dma_alloc_coherent(dev,
5096 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5099 if (!sg_chain_buffer) {
5100 dev_err(&ctrl_info->pci_dev->dev,
5101 "failed to allocate PQI scatter-gather chain buffers\n");
5105 io_request->index = i;
5106 io_request->sg_chain_buffer = sg_chain_buffer;
5107 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
5114 pqi_free_all_io_requests(ctrl_info);
5120 * Calculate required resources that are sized based on max. outstanding
5121 * requests and max. transfer size.
5124 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5126 u32 max_transfer_size;
5129 ctrl_info->scsi_ml_can_queue =
5130 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5131 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5133 ctrl_info->error_buffer_length =
5134 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5137 max_transfer_size = min(ctrl_info->max_transfer_size,
5138 PQI_MAX_TRANSFER_SIZE_KDUMP);
5140 max_transfer_size = min(ctrl_info->max_transfer_size,
5141 PQI_MAX_TRANSFER_SIZE);
5143 max_sg_entries = max_transfer_size / PAGE_SIZE;
5145 /* +1 to cover when the buffer is not page-aligned. */
5148 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5150 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5152 ctrl_info->sg_chain_buffer_length =
5153 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5154 PQI_EXTRA_SGL_MEMORY;
5155 ctrl_info->sg_tablesize = max_sg_entries;
5156 ctrl_info->max_sectors = max_transfer_size / 512;
5159 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5161 int num_queue_groups;
5162 u16 num_elements_per_iq;
5163 u16 num_elements_per_oq;
5165 if (reset_devices) {
5166 num_queue_groups = 1;
5169 int max_queue_groups;
5171 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5172 ctrl_info->max_outbound_queues - 1);
5173 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
5175 num_cpus = num_online_cpus();
5176 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
5177 num_queue_groups = min(num_queue_groups, max_queue_groups);
5180 ctrl_info->num_queue_groups = num_queue_groups;
5181 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
5184 * Make sure that the max. inbound IU length is an even multiple
5185 * of our inbound element length.
5187 ctrl_info->max_inbound_iu_length =
5188 (ctrl_info->max_inbound_iu_length_per_firmware /
5189 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5190 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
5192 num_elements_per_iq =
5193 (ctrl_info->max_inbound_iu_length /
5194 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5196 /* Add one because one element in each queue is unusable. */
5197 num_elements_per_iq++;
5199 num_elements_per_iq = min(num_elements_per_iq,
5200 ctrl_info->max_elements_per_iq);
5202 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5203 num_elements_per_oq = min(num_elements_per_oq,
5204 ctrl_info->max_elements_per_oq);
5206 ctrl_info->num_elements_per_iq = num_elements_per_iq;
5207 ctrl_info->num_elements_per_oq = num_elements_per_oq;
5209 ctrl_info->max_sg_per_iu =
5210 ((ctrl_info->max_inbound_iu_length -
5211 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5212 sizeof(struct pqi_sg_descriptor)) +
5213 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5215 ctrl_info->max_sg_per_r56_iu =
5216 ((ctrl_info->max_inbound_iu_length -
5217 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5218 sizeof(struct pqi_sg_descriptor)) +
5219 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5222 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5223 struct scatterlist *sg)
5225 u64 address = (u64)sg_dma_address(sg);
5226 unsigned int length = sg_dma_len(sg);
5228 put_unaligned_le64(address, &sg_descriptor->address);
5229 put_unaligned_le32(length, &sg_descriptor->length);
5230 put_unaligned_le32(0, &sg_descriptor->flags);
5233 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5234 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5235 int max_sg_per_iu, bool *chained)
5238 unsigned int num_sg_in_iu;
5243 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
5246 pqi_set_sg_descriptor(sg_descriptor, sg);
5253 if (i == max_sg_per_iu) {
5254 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5255 &sg_descriptor->address);
5256 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5257 &sg_descriptor->length);
5258 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5261 sg_descriptor = io_request->sg_chain_buffer;
5266 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5268 return num_sg_in_iu;
5271 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5272 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5273 struct pqi_io_request *io_request)
5278 unsigned int num_sg_in_iu;
5279 struct scatterlist *sg;
5280 struct pqi_sg_descriptor *sg_descriptor;
5282 sg_count = scsi_dma_map(scmd);
5286 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5287 PQI_REQUEST_HEADER_LENGTH;
5292 sg = scsi_sglist(scmd);
5293 sg_descriptor = request->sg_descriptors;
5295 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5296 ctrl_info->max_sg_per_iu, &chained);
5298 request->partial = chained;
5299 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5302 put_unaligned_le16(iu_length, &request->header.iu_length);
5307 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5308 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5309 struct pqi_io_request *io_request)
5314 unsigned int num_sg_in_iu;
5315 struct scatterlist *sg;
5316 struct pqi_sg_descriptor *sg_descriptor;
5318 sg_count = scsi_dma_map(scmd);
5322 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5323 PQI_REQUEST_HEADER_LENGTH;
5329 sg = scsi_sglist(scmd);
5330 sg_descriptor = request->sg_descriptors;
5332 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5333 ctrl_info->max_sg_per_iu, &chained);
5335 request->partial = chained;
5336 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5339 put_unaligned_le16(iu_length, &request->header.iu_length);
5340 request->num_sg_descriptors = num_sg_in_iu;
5345 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5346 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5347 struct pqi_io_request *io_request)
5352 unsigned int num_sg_in_iu;
5353 struct scatterlist *sg;
5354 struct pqi_sg_descriptor *sg_descriptor;
5356 sg_count = scsi_dma_map(scmd);
5360 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5361 PQI_REQUEST_HEADER_LENGTH;
5364 if (sg_count != 0) {
5365 sg = scsi_sglist(scmd);
5366 sg_descriptor = request->sg_descriptors;
5368 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5369 ctrl_info->max_sg_per_r56_iu, &chained);
5371 request->partial = chained;
5372 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5375 put_unaligned_le16(iu_length, &request->header.iu_length);
5376 request->num_sg_descriptors = num_sg_in_iu;
5381 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5382 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5383 struct pqi_io_request *io_request)
5388 unsigned int num_sg_in_iu;
5389 struct scatterlist *sg;
5390 struct pqi_sg_descriptor *sg_descriptor;
5392 sg_count = scsi_dma_map(scmd);
5396 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5397 PQI_REQUEST_HEADER_LENGTH;
5403 sg = scsi_sglist(scmd);
5404 sg_descriptor = request->sg_descriptors;
5406 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5407 ctrl_info->max_sg_per_iu, &chained);
5409 request->partial = chained;
5410 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5413 put_unaligned_le16(iu_length, &request->header.iu_length);
5414 request->num_sg_descriptors = num_sg_in_iu;
5419 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5422 struct scsi_cmnd *scmd;
5424 scmd = io_request->scmd;
5425 pqi_free_io_request(io_request);
5426 scsi_dma_unmap(scmd);
5427 pqi_scsi_done(scmd);
5430 static int pqi_raid_submit_scsi_cmd_with_io_request(
5431 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
5432 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5433 struct pqi_queue_group *queue_group)
5437 struct pqi_raid_path_request *request;
5439 io_request->io_complete_callback = pqi_raid_io_complete;
5440 io_request->scmd = scmd;
5442 request = io_request->iu;
5443 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5445 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5446 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5447 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5448 put_unaligned_le16(io_request->index, &request->request_id);
5449 request->error_index = request->request_id;
5450 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5452 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5453 memcpy(request->cdb, scmd->cmnd, cdb_length);
5455 switch (cdb_length) {
5460 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5463 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5466 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5469 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5473 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5477 switch (scmd->sc_data_direction) {
5479 request->data_direction = SOP_READ_FLAG;
5481 case DMA_FROM_DEVICE:
5482 request->data_direction = SOP_WRITE_FLAG;
5485 request->data_direction = SOP_NO_DIRECTION_FLAG;
5487 case DMA_BIDIRECTIONAL:
5488 request->data_direction = SOP_BIDIRECTIONAL;
5491 dev_err(&ctrl_info->pci_dev->dev,
5492 "unknown data direction: %d\n",
5493 scmd->sc_data_direction);
5497 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5499 pqi_free_io_request(io_request);
5500 return SCSI_MLQUEUE_HOST_BUSY;
5503 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5508 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5509 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5510 struct pqi_queue_group *queue_group)
5512 struct pqi_io_request *io_request;
5514 io_request = pqi_alloc_io_request(ctrl_info);
5516 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5517 device, scmd, queue_group);
5520 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5522 struct scsi_cmnd *scmd;
5523 struct pqi_scsi_dev *device;
5524 struct pqi_ctrl_info *ctrl_info;
5526 if (!io_request->raid_bypass)
5529 scmd = io_request->scmd;
5530 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5532 if (host_byte(scmd->result) == DID_NO_CONNECT)
5535 device = scmd->device->hostdata;
5536 if (pqi_device_offline(device) || pqi_device_in_remove(device))
5539 ctrl_info = shost_to_hba(scmd->device->host);
5540 if (pqi_ctrl_offline(ctrl_info))
5546 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5549 struct scsi_cmnd *scmd;
5551 scmd = io_request->scmd;
5552 scsi_dma_unmap(scmd);
5553 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5554 set_host_byte(scmd, DID_IMM_RETRY);
5555 scmd->SCp.this_residual++;
5558 pqi_free_io_request(io_request);
5559 pqi_scsi_done(scmd);
5562 static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info,
5563 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
5568 io_high_prio = false;
5570 if (device->ncq_prio_enable) {
5572 IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
5573 if (priority_class == IOPRIO_CLASS_RT) {
5574 /* Set NCQ priority for read/write commands. */
5575 switch (scmd->cmnd[0]) {
5584 io_high_prio = true;
5590 return io_high_prio;
5593 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5594 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5595 struct pqi_queue_group *queue_group)
5599 io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd);
5601 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5602 scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5603 false, io_high_prio);
5606 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5607 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5608 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5609 struct pqi_encryption_info *encryption_info, bool raid_bypass,
5613 struct pqi_io_request *io_request;
5614 struct pqi_aio_path_request *request;
5616 io_request = pqi_alloc_io_request(ctrl_info);
5617 io_request->io_complete_callback = pqi_aio_io_complete;
5618 io_request->scmd = scmd;
5619 io_request->raid_bypass = raid_bypass;
5621 request = io_request->iu;
5622 memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
5624 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5625 put_unaligned_le32(aio_handle, &request->nexus_id);
5626 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5627 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5628 request->command_priority = io_high_prio;
5629 put_unaligned_le16(io_request->index, &request->request_id);
5630 request->error_index = request->request_id;
5631 if (cdb_length > sizeof(request->cdb))
5632 cdb_length = sizeof(request->cdb);
5633 request->cdb_length = cdb_length;
5634 memcpy(request->cdb, cdb, cdb_length);
5636 switch (scmd->sc_data_direction) {
5638 request->data_direction = SOP_READ_FLAG;
5640 case DMA_FROM_DEVICE:
5641 request->data_direction = SOP_WRITE_FLAG;
5644 request->data_direction = SOP_NO_DIRECTION_FLAG;
5646 case DMA_BIDIRECTIONAL:
5647 request->data_direction = SOP_BIDIRECTIONAL;
5650 dev_err(&ctrl_info->pci_dev->dev,
5651 "unknown data direction: %d\n",
5652 scmd->sc_data_direction);
5656 if (encryption_info) {
5657 request->encryption_enable = true;
5658 put_unaligned_le16(encryption_info->data_encryption_key_index,
5659 &request->data_encryption_key_index);
5660 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5661 &request->encrypt_tweak_lower);
5662 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5663 &request->encrypt_tweak_upper);
5666 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5668 pqi_free_io_request(io_request);
5669 return SCSI_MLQUEUE_HOST_BUSY;
5672 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5677 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5678 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5679 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5680 struct pqi_scsi_dev_raid_map_data *rmd)
5683 struct pqi_io_request *io_request;
5684 struct pqi_aio_r1_path_request *r1_request;
5686 io_request = pqi_alloc_io_request(ctrl_info);
5687 io_request->io_complete_callback = pqi_aio_io_complete;
5688 io_request->scmd = scmd;
5689 io_request->raid_bypass = true;
5691 r1_request = io_request->iu;
5692 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5694 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5695 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5696 r1_request->num_drives = rmd->num_it_nexus_entries;
5697 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5698 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5699 if (rmd->num_it_nexus_entries == 3)
5700 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5702 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5703 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5704 put_unaligned_le16(io_request->index, &r1_request->request_id);
5705 r1_request->error_index = r1_request->request_id;
5706 if (rmd->cdb_length > sizeof(r1_request->cdb))
5707 rmd->cdb_length = sizeof(r1_request->cdb);
5708 r1_request->cdb_length = rmd->cdb_length;
5709 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5711 /* The direction is always write. */
5712 r1_request->data_direction = SOP_READ_FLAG;
5714 if (encryption_info) {
5715 r1_request->encryption_enable = true;
5716 put_unaligned_le16(encryption_info->data_encryption_key_index,
5717 &r1_request->data_encryption_key_index);
5718 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5719 &r1_request->encrypt_tweak_lower);
5720 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5721 &r1_request->encrypt_tweak_upper);
5724 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5726 pqi_free_io_request(io_request);
5727 return SCSI_MLQUEUE_HOST_BUSY;
5730 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5735 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5736 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5737 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5738 struct pqi_scsi_dev_raid_map_data *rmd)
5741 struct pqi_io_request *io_request;
5742 struct pqi_aio_r56_path_request *r56_request;
5744 io_request = pqi_alloc_io_request(ctrl_info);
5745 io_request->io_complete_callback = pqi_aio_io_complete;
5746 io_request->scmd = scmd;
5747 io_request->raid_bypass = true;
5749 r56_request = io_request->iu;
5750 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5752 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5753 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5755 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5757 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5758 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5759 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5760 if (rmd->raid_level == SA_RAID_6) {
5761 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5762 r56_request->xor_multiplier = rmd->xor_mult;
5764 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5765 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5766 put_unaligned_le64(rmd->row, &r56_request->row);
5768 put_unaligned_le16(io_request->index, &r56_request->request_id);
5769 r56_request->error_index = r56_request->request_id;
5771 if (rmd->cdb_length > sizeof(r56_request->cdb))
5772 rmd->cdb_length = sizeof(r56_request->cdb);
5773 r56_request->cdb_length = rmd->cdb_length;
5774 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5776 /* The direction is always write. */
5777 r56_request->data_direction = SOP_READ_FLAG;
5779 if (encryption_info) {
5780 r56_request->encryption_enable = true;
5781 put_unaligned_le16(encryption_info->data_encryption_key_index,
5782 &r56_request->data_encryption_key_index);
5783 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5784 &r56_request->encrypt_tweak_lower);
5785 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5786 &r56_request->encrypt_tweak_upper);
5789 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5791 pqi_free_io_request(io_request);
5792 return SCSI_MLQUEUE_HOST_BUSY;
5795 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5800 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5801 struct scsi_cmnd *scmd)
5805 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
5806 if (hw_queue > ctrl_info->max_hw_queue_index)
5812 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5814 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5817 return scmd->SCp.this_residual == 0;
5821 * This function gets called just before we hand the completed SCSI request
5825 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5827 struct pqi_scsi_dev *device;
5829 if (!scmd->device) {
5830 set_host_byte(scmd, DID_NO_CONNECT);
5834 device = scmd->device->hostdata;
5836 set_host_byte(scmd, DID_NO_CONNECT);
5840 atomic_dec(&device->scsi_cmds_outstanding);
5843 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5844 struct scsi_cmnd *scmd)
5850 struct pqi_scsi_dev *device;
5851 struct pqi_stream_data *pqi_stream_data;
5852 struct pqi_scsi_dev_raid_map_data rmd;
5854 if (!ctrl_info->enable_stream_detection)
5857 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5861 /* Check writes only. */
5865 device = scmd->device->hostdata;
5867 /* Check for RAID 5/6 streams. */
5868 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5872 * If controller does not support AIO RAID{5,6} writes, need to send
5873 * requests down non-AIO path.
5875 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5876 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5880 oldest_jiffies = INT_MAX;
5881 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5882 pqi_stream_data = &device->stream_data[i];
5884 * Check for adjacent request or request is within
5885 * the previous request.
5887 if ((pqi_stream_data->next_lba &&
5888 rmd.first_block >= pqi_stream_data->next_lba) &&
5889 rmd.first_block <= pqi_stream_data->next_lba +
5891 pqi_stream_data->next_lba = rmd.first_block +
5893 pqi_stream_data->last_accessed = jiffies;
5898 if (pqi_stream_data->last_accessed == 0) {
5903 /* Find entry with oldest last accessed time. */
5904 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
5905 oldest_jiffies = pqi_stream_data->last_accessed;
5910 /* Set LRU entry. */
5911 pqi_stream_data = &device->stream_data[lru_index];
5912 pqi_stream_data->last_accessed = jiffies;
5913 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
5918 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5921 struct pqi_ctrl_info *ctrl_info;
5922 struct pqi_scsi_dev *device;
5924 struct pqi_queue_group *queue_group;
5927 device = scmd->device->hostdata;
5930 set_host_byte(scmd, DID_NO_CONNECT);
5931 pqi_scsi_done(scmd);
5935 atomic_inc(&device->scsi_cmds_outstanding);
5937 ctrl_info = shost_to_hba(shost);
5939 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
5940 set_host_byte(scmd, DID_NO_CONNECT);
5941 pqi_scsi_done(scmd);
5945 if (pqi_ctrl_blocked(ctrl_info)) {
5946 rc = SCSI_MLQUEUE_HOST_BUSY;
5951 * This is necessary because the SML doesn't zero out this field during
5956 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5957 queue_group = &ctrl_info->queue_groups[hw_queue];
5959 if (pqi_is_logical_device(device)) {
5960 raid_bypassed = false;
5961 if (device->raid_bypass_enabled &&
5962 pqi_is_bypass_eligible_request(scmd) &&
5963 !pqi_is_parity_write_stream(ctrl_info, scmd)) {
5964 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5965 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
5966 raid_bypassed = true;
5967 atomic_inc(&device->raid_bypass_cnt);
5971 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5973 if (device->aio_enabled)
5974 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5976 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5981 atomic_dec(&device->scsi_cmds_outstanding);
5986 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
5990 unsigned long flags;
5991 unsigned int queued_io_count;
5992 struct pqi_queue_group *queue_group;
5993 struct pqi_io_request *io_request;
5995 queued_io_count = 0;
5997 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5998 queue_group = &ctrl_info->queue_groups[i];
5999 for (path = 0; path < 2; path++) {
6000 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
6001 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
6003 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
6007 return queued_io_count;
6010 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
6014 unsigned int nonempty_inbound_queue_count;
6015 struct pqi_queue_group *queue_group;
6019 nonempty_inbound_queue_count = 0;
6021 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6022 queue_group = &ctrl_info->queue_groups[i];
6023 for (path = 0; path < 2; path++) {
6024 iq_pi = queue_group->iq_pi_copy[path];
6025 iq_ci = readl(queue_group->iq_ci[path]);
6027 nonempty_inbound_queue_count++;
6031 return nonempty_inbound_queue_count;
6034 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10
6036 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6038 unsigned long start_jiffies;
6039 unsigned long warning_timeout;
6040 unsigned int queued_io_count;
6041 unsigned int nonempty_inbound_queue_count;
6042 bool displayed_warning;
6044 displayed_warning = false;
6045 start_jiffies = jiffies;
6046 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6049 queued_io_count = pqi_queued_io_count(ctrl_info);
6050 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6051 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6053 pqi_check_ctrl_health(ctrl_info);
6054 if (pqi_ctrl_offline(ctrl_info))
6056 if (time_after(jiffies, warning_timeout)) {
6057 dev_warn(&ctrl_info->pci_dev->dev,
6058 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6059 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6060 displayed_warning = true;
6061 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6063 usleep_range(1000, 2000);
6066 if (displayed_warning)
6067 dev_warn(&ctrl_info->pci_dev->dev,
6068 "queued I/O drained after waiting for %u seconds\n",
6069 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6074 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6075 struct pqi_scsi_dev *device)
6079 struct pqi_queue_group *queue_group;
6080 unsigned long flags;
6081 struct pqi_io_request *io_request;
6082 struct pqi_io_request *next;
6083 struct scsi_cmnd *scmd;
6084 struct pqi_scsi_dev *scsi_device;
6086 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6087 queue_group = &ctrl_info->queue_groups[i];
6089 for (path = 0; path < 2; path++) {
6091 &queue_group->submit_lock[path], flags);
6093 list_for_each_entry_safe(io_request, next,
6094 &queue_group->request_list[path],
6095 request_list_entry) {
6097 scmd = io_request->scmd;
6101 scsi_device = scmd->device->hostdata;
6102 if (scsi_device != device)
6105 list_del(&io_request->request_list_entry);
6106 set_host_byte(scmd, DID_RESET);
6107 pqi_free_io_request(io_request);
6108 scsi_dma_unmap(scmd);
6109 pqi_scsi_done(scmd);
6112 spin_unlock_irqrestore(
6113 &queue_group->submit_lock[path], flags);
6118 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
6120 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
6121 struct pqi_scsi_dev *device, unsigned long timeout_msecs)
6123 int cmds_outstanding;
6124 unsigned long start_jiffies;
6125 unsigned long warning_timeout;
6126 unsigned long msecs_waiting;
6128 start_jiffies = jiffies;
6129 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6131 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) {
6132 pqi_check_ctrl_health(ctrl_info);
6133 if (pqi_ctrl_offline(ctrl_info))
6135 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6136 if (msecs_waiting >= timeout_msecs) {
6137 dev_err(&ctrl_info->pci_dev->dev,
6138 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6139 ctrl_info->scsi_host->host_no, device->bus, device->target,
6140 device->lun, msecs_waiting / 1000, cmds_outstanding);
6143 if (time_after(jiffies, warning_timeout)) {
6144 dev_warn(&ctrl_info->pci_dev->dev,
6145 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6146 ctrl_info->scsi_host->host_no, device->bus, device->target,
6147 device->lun, msecs_waiting / 1000, cmds_outstanding);
6148 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6150 usleep_range(1000, 2000);
6156 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6159 struct completion *waiting = context;
6164 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
6166 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
6167 struct pqi_scsi_dev *device, struct completion *wait)
6170 unsigned int wait_secs;
6171 int cmds_outstanding;
6176 if (wait_for_completion_io_timeout(wait,
6177 PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
6182 pqi_check_ctrl_health(ctrl_info);
6183 if (pqi_ctrl_offline(ctrl_info)) {
6188 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
6189 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding);
6190 dev_warn(&ctrl_info->pci_dev->dev,
6191 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
6192 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, wait_secs, cmds_outstanding);
6198 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
6200 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
6203 struct pqi_io_request *io_request;
6204 DECLARE_COMPLETION_ONSTACK(wait);
6205 struct pqi_task_management_request *request;
6207 io_request = pqi_alloc_io_request(ctrl_info);
6208 io_request->io_complete_callback = pqi_lun_reset_complete;
6209 io_request->context = &wait;
6211 request = io_request->iu;
6212 memset(request, 0, sizeof(*request));
6214 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6215 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6216 &request->header.iu_length);
6217 put_unaligned_le16(io_request->index, &request->request_id);
6218 memcpy(request->lun_number, device->scsi3addr,
6219 sizeof(request->lun_number));
6220 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
6221 if (ctrl_info->tmf_iu_timeout_supported)
6222 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6224 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6227 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
6229 rc = io_request->status;
6231 pqi_free_io_request(io_request);
6236 #define PQI_LUN_RESET_RETRIES 3
6237 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
6238 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
6239 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
6241 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
6245 unsigned int retries;
6246 unsigned long timeout_msecs;
6248 for (retries = 0;;) {
6249 reset_rc = pqi_lun_reset(ctrl_info, device);
6250 if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
6252 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6255 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6256 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
6258 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs);
6259 if (wait_rc && reset_rc == 0)
6262 return reset_rc == 0 ? SUCCESS : FAILED;
6265 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
6266 struct pqi_scsi_dev *device)
6270 pqi_ctrl_block_requests(ctrl_info);
6271 pqi_ctrl_wait_until_quiesced(ctrl_info);
6272 pqi_fail_io_queued_for_device(ctrl_info, device);
6273 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6277 rc = pqi_lun_reset_with_retries(ctrl_info, device);
6278 pqi_ctrl_unblock_requests(ctrl_info);
6283 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6286 struct Scsi_Host *shost;
6287 struct pqi_ctrl_info *ctrl_info;
6288 struct pqi_scsi_dev *device;
6290 shost = scmd->device->host;
6291 ctrl_info = shost_to_hba(shost);
6292 device = scmd->device->hostdata;
6294 mutex_lock(&ctrl_info->lun_reset_mutex);
6296 dev_err(&ctrl_info->pci_dev->dev,
6297 "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
6299 device->bus, device->target, device->lun,
6300 scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
6302 pqi_check_ctrl_health(ctrl_info);
6303 if (pqi_ctrl_offline(ctrl_info))
6306 rc = pqi_device_reset(ctrl_info, device);
6308 dev_err(&ctrl_info->pci_dev->dev,
6309 "reset of scsi %d:%d:%d:%d: %s\n",
6310 shost->host_no, device->bus, device->target, device->lun,
6311 rc == SUCCESS ? "SUCCESS" : "FAILED");
6313 mutex_unlock(&ctrl_info->lun_reset_mutex);
6318 static int pqi_slave_alloc(struct scsi_device *sdev)
6320 struct pqi_scsi_dev *device;
6321 unsigned long flags;
6322 struct pqi_ctrl_info *ctrl_info;
6323 struct scsi_target *starget;
6324 struct sas_rphy *rphy;
6326 ctrl_info = shost_to_hba(sdev->host);
6328 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6330 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6331 starget = scsi_target(sdev);
6332 rphy = target_to_rphy(starget);
6333 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6335 if (device->target_lun_valid) {
6336 device->ignore_device = true;
6338 device->target = sdev_id(sdev);
6339 device->lun = sdev->lun;
6340 device->target_lun_valid = true;
6344 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6345 sdev_id(sdev), sdev->lun);
6349 sdev->hostdata = device;
6350 device->sdev = sdev;
6351 if (device->queue_depth) {
6352 device->advertised_queue_depth = device->queue_depth;
6353 scsi_change_queue_depth(sdev,
6354 device->advertised_queue_depth);
6356 if (pqi_is_logical_device(device)) {
6357 pqi_disable_write_same(sdev);
6359 sdev->allow_restart = 1;
6360 if (device->device_type == SA_DEVICE_TYPE_NVME)
6361 pqi_disable_write_same(sdev);
6365 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6370 static int pqi_map_queues(struct Scsi_Host *shost)
6372 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6374 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6375 ctrl_info->pci_dev, 0);
6378 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6380 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6383 static int pqi_slave_configure(struct scsi_device *sdev)
6386 struct pqi_scsi_dev *device;
6388 device = sdev->hostdata;
6389 device->devtype = sdev->type;
6391 if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6393 device->ignore_device = false;
6399 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6401 struct pci_dev *pci_dev;
6402 u32 subsystem_vendor;
6403 u32 subsystem_device;
6404 cciss_pci_info_struct pciinfo;
6409 pci_dev = ctrl_info->pci_dev;
6411 pciinfo.domain = pci_domain_nr(pci_dev->bus);
6412 pciinfo.bus = pci_dev->bus->number;
6413 pciinfo.dev_fn = pci_dev->devfn;
6414 subsystem_vendor = pci_dev->subsystem_vendor;
6415 subsystem_device = pci_dev->subsystem_device;
6416 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6418 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
6424 static int pqi_getdrivver_ioctl(void __user *arg)
6431 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6432 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6434 if (copy_to_user(arg, &version, sizeof(version)))
6440 struct ciss_error_info {
6443 size_t sense_data_length;
6446 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6447 struct ciss_error_info *ciss_error_info)
6449 int ciss_cmd_status;
6450 size_t sense_data_length;
6452 switch (pqi_error_info->data_out_result) {
6453 case PQI_DATA_IN_OUT_GOOD:
6454 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6456 case PQI_DATA_IN_OUT_UNDERFLOW:
6457 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6459 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6460 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6462 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6463 case PQI_DATA_IN_OUT_BUFFER_ERROR:
6464 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6465 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6466 case PQI_DATA_IN_OUT_ERROR:
6467 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6469 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6470 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6471 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6472 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6473 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6474 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6475 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6476 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6477 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6478 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6479 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6481 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6482 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6484 case PQI_DATA_IN_OUT_ABORTED:
6485 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6487 case PQI_DATA_IN_OUT_TIMEOUT:
6488 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6491 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6496 get_unaligned_le16(&pqi_error_info->sense_data_length);
6497 if (sense_data_length == 0)
6499 get_unaligned_le16(&pqi_error_info->response_data_length);
6500 if (sense_data_length)
6501 if (sense_data_length > sizeof(pqi_error_info->data))
6502 sense_data_length = sizeof(pqi_error_info->data);
6504 ciss_error_info->scsi_status = pqi_error_info->status;
6505 ciss_error_info->command_status = ciss_cmd_status;
6506 ciss_error_info->sense_data_length = sense_data_length;
6509 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6512 char *kernel_buffer = NULL;
6514 size_t sense_data_length;
6515 IOCTL_Command_struct iocommand;
6516 struct pqi_raid_path_request request;
6517 struct pqi_raid_error_info pqi_error_info;
6518 struct ciss_error_info ciss_error_info;
6520 if (pqi_ctrl_offline(ctrl_info))
6522 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6526 if (!capable(CAP_SYS_RAWIO))
6528 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6530 if (iocommand.buf_size < 1 &&
6531 iocommand.Request.Type.Direction != XFER_NONE)
6533 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6535 if (iocommand.Request.Type.Type != TYPE_CMD)
6538 switch (iocommand.Request.Type.Direction) {
6542 case XFER_READ | XFER_WRITE:
6548 if (iocommand.buf_size > 0) {
6549 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6552 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6553 if (copy_from_user(kernel_buffer, iocommand.buf,
6554 iocommand.buf_size)) {
6559 memset(kernel_buffer, 0, iocommand.buf_size);
6563 memset(&request, 0, sizeof(request));
6565 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6566 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6567 PQI_REQUEST_HEADER_LENGTH;
6568 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6569 sizeof(request.lun_number));
6570 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6571 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6573 switch (iocommand.Request.Type.Direction) {
6575 request.data_direction = SOP_NO_DIRECTION_FLAG;
6578 request.data_direction = SOP_WRITE_FLAG;
6581 request.data_direction = SOP_READ_FLAG;
6583 case XFER_READ | XFER_WRITE:
6584 request.data_direction = SOP_BIDIRECTIONAL;
6588 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6590 if (iocommand.buf_size > 0) {
6591 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6593 rc = pqi_map_single(ctrl_info->pci_dev,
6594 &request.sg_descriptors[0], kernel_buffer,
6595 iocommand.buf_size, DMA_BIDIRECTIONAL);
6599 iu_length += sizeof(request.sg_descriptors[0]);
6602 put_unaligned_le16(iu_length, &request.header.iu_length);
6604 if (ctrl_info->raid_iu_timeout_supported)
6605 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6607 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6608 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6610 if (iocommand.buf_size > 0)
6611 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6614 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6617 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6618 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6619 iocommand.error_info.CommandStatus =
6620 ciss_error_info.command_status;
6621 sense_data_length = ciss_error_info.sense_data_length;
6622 if (sense_data_length) {
6623 if (sense_data_length >
6624 sizeof(iocommand.error_info.SenseInfo))
6626 sizeof(iocommand.error_info.SenseInfo);
6627 memcpy(iocommand.error_info.SenseInfo,
6628 pqi_error_info.data, sense_data_length);
6629 iocommand.error_info.SenseLen = sense_data_length;
6633 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6638 if (rc == 0 && iocommand.buf_size > 0 &&
6639 (iocommand.Request.Type.Direction & XFER_READ)) {
6640 if (copy_to_user(iocommand.buf, kernel_buffer,
6641 iocommand.buf_size)) {
6647 kfree(kernel_buffer);
6652 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6656 struct pqi_ctrl_info *ctrl_info;
6658 ctrl_info = shost_to_hba(sdev->host);
6661 case CCISS_DEREGDISK:
6662 case CCISS_REGNEWDISK:
6664 rc = pqi_scan_scsi_devices(ctrl_info);
6666 case CCISS_GETPCIINFO:
6667 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6669 case CCISS_GETDRIVVER:
6670 rc = pqi_getdrivver_ioctl(arg);
6672 case CCISS_PASSTHRU:
6673 rc = pqi_passthru_ioctl(ctrl_info, arg);
6683 static ssize_t pqi_firmware_version_show(struct device *dev,
6684 struct device_attribute *attr, char *buffer)
6686 struct Scsi_Host *shost;
6687 struct pqi_ctrl_info *ctrl_info;
6689 shost = class_to_shost(dev);
6690 ctrl_info = shost_to_hba(shost);
6692 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6695 static ssize_t pqi_driver_version_show(struct device *dev,
6696 struct device_attribute *attr, char *buffer)
6698 return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6701 static ssize_t pqi_serial_number_show(struct device *dev,
6702 struct device_attribute *attr, char *buffer)
6704 struct Scsi_Host *shost;
6705 struct pqi_ctrl_info *ctrl_info;
6707 shost = class_to_shost(dev);
6708 ctrl_info = shost_to_hba(shost);
6710 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6713 static ssize_t pqi_model_show(struct device *dev,
6714 struct device_attribute *attr, char *buffer)
6716 struct Scsi_Host *shost;
6717 struct pqi_ctrl_info *ctrl_info;
6719 shost = class_to_shost(dev);
6720 ctrl_info = shost_to_hba(shost);
6722 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6725 static ssize_t pqi_vendor_show(struct device *dev,
6726 struct device_attribute *attr, char *buffer)
6728 struct Scsi_Host *shost;
6729 struct pqi_ctrl_info *ctrl_info;
6731 shost = class_to_shost(dev);
6732 ctrl_info = shost_to_hba(shost);
6734 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6737 static ssize_t pqi_host_rescan_store(struct device *dev,
6738 struct device_attribute *attr, const char *buffer, size_t count)
6740 struct Scsi_Host *shost = class_to_shost(dev);
6742 pqi_scan_start(shost);
6747 static ssize_t pqi_lockup_action_show(struct device *dev,
6748 struct device_attribute *attr, char *buffer)
6753 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6754 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6755 count += scnprintf(buffer + count, PAGE_SIZE - count,
6756 "[%s] ", pqi_lockup_actions[i].name);
6758 count += scnprintf(buffer + count, PAGE_SIZE - count,
6759 "%s ", pqi_lockup_actions[i].name);
6762 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6767 static ssize_t pqi_lockup_action_store(struct device *dev,
6768 struct device_attribute *attr, const char *buffer, size_t count)
6772 char action_name_buffer[32];
6774 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6775 action_name = strstrip(action_name_buffer);
6777 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6778 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6779 pqi_lockup_action = pqi_lockup_actions[i].action;
6787 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6788 struct device_attribute *attr, char *buffer)
6790 struct Scsi_Host *shost = class_to_shost(dev);
6791 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6793 return scnprintf(buffer, 10, "%x\n",
6794 ctrl_info->enable_stream_detection);
6797 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
6798 struct device_attribute *attr, const char *buffer, size_t count)
6800 struct Scsi_Host *shost = class_to_shost(dev);
6801 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6802 u8 set_stream_detection = 0;
6804 if (kstrtou8(buffer, 0, &set_stream_detection))
6807 if (set_stream_detection > 0)
6808 set_stream_detection = 1;
6810 ctrl_info->enable_stream_detection = set_stream_detection;
6815 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
6816 struct device_attribute *attr, char *buffer)
6818 struct Scsi_Host *shost = class_to_shost(dev);
6819 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6821 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
6824 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
6825 struct device_attribute *attr, const char *buffer, size_t count)
6827 struct Scsi_Host *shost = class_to_shost(dev);
6828 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6829 u8 set_r5_writes = 0;
6831 if (kstrtou8(buffer, 0, &set_r5_writes))
6834 if (set_r5_writes > 0)
6837 ctrl_info->enable_r5_writes = set_r5_writes;
6842 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
6843 struct device_attribute *attr, char *buffer)
6845 struct Scsi_Host *shost = class_to_shost(dev);
6846 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6848 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
6851 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
6852 struct device_attribute *attr, const char *buffer, size_t count)
6854 struct Scsi_Host *shost = class_to_shost(dev);
6855 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6856 u8 set_r6_writes = 0;
6858 if (kstrtou8(buffer, 0, &set_r6_writes))
6861 if (set_r6_writes > 0)
6864 ctrl_info->enable_r6_writes = set_r6_writes;
6869 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6870 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6871 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6872 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6873 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6874 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6875 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
6876 pqi_lockup_action_store);
6877 static DEVICE_ATTR(enable_stream_detection, 0644,
6878 pqi_host_enable_stream_detection_show,
6879 pqi_host_enable_stream_detection_store);
6880 static DEVICE_ATTR(enable_r5_writes, 0644,
6881 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
6882 static DEVICE_ATTR(enable_r6_writes, 0644,
6883 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
6885 static struct attribute *pqi_shost_attrs[] = {
6886 &dev_attr_driver_version.attr,
6887 &dev_attr_firmware_version.attr,
6888 &dev_attr_model.attr,
6889 &dev_attr_serial_number.attr,
6890 &dev_attr_vendor.attr,
6891 &dev_attr_rescan.attr,
6892 &dev_attr_lockup_action.attr,
6893 &dev_attr_enable_stream_detection.attr,
6894 &dev_attr_enable_r5_writes.attr,
6895 &dev_attr_enable_r6_writes.attr,
6899 ATTRIBUTE_GROUPS(pqi_shost);
6901 static ssize_t pqi_unique_id_show(struct device *dev,
6902 struct device_attribute *attr, char *buffer)
6904 struct pqi_ctrl_info *ctrl_info;
6905 struct scsi_device *sdev;
6906 struct pqi_scsi_dev *device;
6907 unsigned long flags;
6910 sdev = to_scsi_device(dev);
6911 ctrl_info = shost_to_hba(sdev->host);
6913 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6915 device = sdev->hostdata;
6917 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6921 if (device->is_physical_device)
6922 memcpy(unique_id, device->wwid, sizeof(device->wwid));
6924 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
6926 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6928 return scnprintf(buffer, PAGE_SIZE,
6929 "%02X%02X%02X%02X%02X%02X%02X%02X"
6930 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
6931 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
6932 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
6933 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
6934 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
6937 static ssize_t pqi_lunid_show(struct device *dev,
6938 struct device_attribute *attr, char *buffer)
6940 struct pqi_ctrl_info *ctrl_info;
6941 struct scsi_device *sdev;
6942 struct pqi_scsi_dev *device;
6943 unsigned long flags;
6946 sdev = to_scsi_device(dev);
6947 ctrl_info = shost_to_hba(sdev->host);
6949 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6951 device = sdev->hostdata;
6953 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6957 memcpy(lunid, device->scsi3addr, sizeof(lunid));
6959 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6961 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6966 static ssize_t pqi_path_info_show(struct device *dev,
6967 struct device_attribute *attr, char *buf)
6969 struct pqi_ctrl_info *ctrl_info;
6970 struct scsi_device *sdev;
6971 struct pqi_scsi_dev *device;
6972 unsigned long flags;
6979 u8 phys_connector[2];
6981 sdev = to_scsi_device(dev);
6982 ctrl_info = shost_to_hba(sdev->host);
6984 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6986 device = sdev->hostdata;
6988 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6993 for (i = 0; i < MAX_PATHS; i++) {
6994 path_map_index = 1 << i;
6995 if (i == device->active_path_index)
6997 else if (device->path_map & path_map_index)
6998 active = "Inactive";
7002 output_len += scnprintf(buf + output_len,
7003 PAGE_SIZE - output_len,
7004 "[%d:%d:%d:%d] %20.20s ",
7005 ctrl_info->scsi_host->host_no,
7006 device->bus, device->target,
7008 scsi_device_type(device->devtype));
7010 if (device->devtype == TYPE_RAID ||
7011 pqi_is_logical_device(device))
7014 memcpy(&phys_connector, &device->phys_connector[i],
7015 sizeof(phys_connector));
7016 if (phys_connector[0] < '0')
7017 phys_connector[0] = '0';
7018 if (phys_connector[1] < '0')
7019 phys_connector[1] = '0';
7021 output_len += scnprintf(buf + output_len,
7022 PAGE_SIZE - output_len,
7023 "PORT: %.2s ", phys_connector);
7025 box = device->box[i];
7026 if (box != 0 && box != 0xFF)
7027 output_len += scnprintf(buf + output_len,
7028 PAGE_SIZE - output_len,
7031 if ((device->devtype == TYPE_DISK ||
7032 device->devtype == TYPE_ZBC) &&
7033 pqi_expose_device(device))
7034 output_len += scnprintf(buf + output_len,
7035 PAGE_SIZE - output_len,
7039 output_len += scnprintf(buf + output_len,
7040 PAGE_SIZE - output_len,
7044 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7049 static ssize_t pqi_sas_address_show(struct device *dev,
7050 struct device_attribute *attr, char *buffer)
7052 struct pqi_ctrl_info *ctrl_info;
7053 struct scsi_device *sdev;
7054 struct pqi_scsi_dev *device;
7055 unsigned long flags;
7058 sdev = to_scsi_device(dev);
7059 ctrl_info = shost_to_hba(sdev->host);
7061 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7063 device = sdev->hostdata;
7065 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7069 sas_address = device->sas_address;
7071 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7073 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
7076 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7077 struct device_attribute *attr, char *buffer)
7079 struct pqi_ctrl_info *ctrl_info;
7080 struct scsi_device *sdev;
7081 struct pqi_scsi_dev *device;
7082 unsigned long flags;
7084 sdev = to_scsi_device(dev);
7085 ctrl_info = shost_to_hba(sdev->host);
7087 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7089 device = sdev->hostdata;
7091 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7095 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
7099 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7104 static ssize_t pqi_raid_level_show(struct device *dev,
7105 struct device_attribute *attr, char *buffer)
7107 struct pqi_ctrl_info *ctrl_info;
7108 struct scsi_device *sdev;
7109 struct pqi_scsi_dev *device;
7110 unsigned long flags;
7113 sdev = to_scsi_device(dev);
7114 ctrl_info = shost_to_hba(sdev->host);
7116 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7118 device = sdev->hostdata;
7120 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7124 if (pqi_is_logical_device(device))
7125 raid_level = pqi_raid_level_to_string(device->raid_level);
7129 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7131 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
7134 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7135 struct device_attribute *attr, char *buffer)
7137 struct pqi_ctrl_info *ctrl_info;
7138 struct scsi_device *sdev;
7139 struct pqi_scsi_dev *device;
7140 unsigned long flags;
7141 int raid_bypass_cnt;
7143 sdev = to_scsi_device(dev);
7144 ctrl_info = shost_to_hba(sdev->host);
7146 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7148 device = sdev->hostdata;
7150 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7154 raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
7156 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7158 return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
7161 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
7162 struct device_attribute *attr, char *buf)
7164 struct pqi_ctrl_info *ctrl_info;
7165 struct scsi_device *sdev;
7166 struct pqi_scsi_dev *device;
7167 unsigned long flags;
7170 sdev = to_scsi_device(dev);
7171 ctrl_info = shost_to_hba(sdev->host);
7173 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7175 device = sdev->hostdata;
7177 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7181 output_len = snprintf(buf, PAGE_SIZE, "%d\n",
7182 device->ncq_prio_enable);
7183 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7188 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
7189 struct device_attribute *attr,
7190 const char *buf, size_t count)
7192 struct pqi_ctrl_info *ctrl_info;
7193 struct scsi_device *sdev;
7194 struct pqi_scsi_dev *device;
7195 unsigned long flags;
7196 u8 ncq_prio_enable = 0;
7198 if (kstrtou8(buf, 0, &ncq_prio_enable))
7201 sdev = to_scsi_device(dev);
7202 ctrl_info = shost_to_hba(sdev->host);
7204 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7206 device = sdev->hostdata;
7209 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7213 if (!device->ncq_prio_support ||
7214 !device->is_physical_device) {
7215 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7219 device->ncq_prio_enable = ncq_prio_enable;
7221 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7226 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7227 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7228 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
7229 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
7230 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
7231 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
7232 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
7233 static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
7234 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
7236 static struct attribute *pqi_sdev_attrs[] = {
7237 &dev_attr_lunid.attr,
7238 &dev_attr_unique_id.attr,
7239 &dev_attr_path_info.attr,
7240 &dev_attr_sas_address.attr,
7241 &dev_attr_ssd_smart_path_enabled.attr,
7242 &dev_attr_raid_level.attr,
7243 &dev_attr_raid_bypass_cnt.attr,
7244 &dev_attr_sas_ncq_prio_enable.attr,
7248 ATTRIBUTE_GROUPS(pqi_sdev);
7250 static struct scsi_host_template pqi_driver_template = {
7251 .module = THIS_MODULE,
7252 .name = DRIVER_NAME_SHORT,
7253 .proc_name = DRIVER_NAME_SHORT,
7254 .queuecommand = pqi_scsi_queue_command,
7255 .scan_start = pqi_scan_start,
7256 .scan_finished = pqi_scan_finished,
7258 .eh_device_reset_handler = pqi_eh_device_reset_handler,
7260 .slave_alloc = pqi_slave_alloc,
7261 .slave_configure = pqi_slave_configure,
7262 .map_queues = pqi_map_queues,
7263 .sdev_groups = pqi_sdev_groups,
7264 .shost_groups = pqi_shost_groups,
7267 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7270 struct Scsi_Host *shost;
7272 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7274 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
7279 shost->n_io_port = 0;
7280 shost->this_id = -1;
7281 shost->max_channel = PQI_MAX_BUS;
7282 shost->max_cmd_len = MAX_COMMAND_SIZE;
7283 shost->max_lun = ~0;
7285 shost->max_sectors = ctrl_info->max_sectors;
7286 shost->can_queue = ctrl_info->scsi_ml_can_queue;
7287 shost->cmd_per_lun = shost->can_queue;
7288 shost->sg_tablesize = ctrl_info->sg_tablesize;
7289 shost->transportt = pqi_sas_transport_template;
7290 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
7291 shost->unique_id = shost->irq;
7292 shost->nr_hw_queues = ctrl_info->num_queue_groups;
7293 shost->host_tagset = 1;
7294 shost->hostdata[0] = (unsigned long)ctrl_info;
7296 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7298 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
7302 rc = pqi_add_sas_host(shost, ctrl_info);
7304 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
7308 ctrl_info->scsi_host = shost;
7313 scsi_remove_host(shost);
7315 scsi_host_put(shost);
7320 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7322 struct Scsi_Host *shost;
7324 pqi_delete_sas_host(ctrl_info);
7326 shost = ctrl_info->scsi_host;
7330 scsi_remove_host(shost);
7331 scsi_host_put(shost);
7334 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7337 struct pqi_device_registers __iomem *pqi_registers;
7338 unsigned long timeout;
7339 unsigned int timeout_msecs;
7340 union pqi_reset_register reset_reg;
7342 pqi_registers = ctrl_info->pqi_registers;
7343 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7344 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7347 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7348 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7349 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7351 pqi_check_ctrl_health(ctrl_info);
7352 if (pqi_ctrl_offline(ctrl_info)) {
7356 if (time_after(jiffies, timeout)) {
7365 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7368 union pqi_reset_register reset_reg;
7370 if (ctrl_info->pqi_reset_quiesce_supported) {
7371 rc = sis_pqi_reset_quiesce(ctrl_info);
7373 dev_err(&ctrl_info->pci_dev->dev,
7374 "PQI reset failed during quiesce with error %d\n", rc);
7379 reset_reg.all_bits = 0;
7380 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7381 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7383 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7385 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7387 dev_err(&ctrl_info->pci_dev->dev,
7388 "PQI reset failed with error %d\n", rc);
7393 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7396 struct bmic_sense_subsystem_info *sense_info;
7398 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7402 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7406 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7407 sizeof(sense_info->ctrl_serial_number));
7408 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7416 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7419 struct bmic_identify_controller *identify;
7421 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7425 rc = pqi_identify_controller(ctrl_info, identify);
7429 if (get_unaligned_le32(&identify->extra_controller_flags) &
7430 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7431 memcpy(ctrl_info->firmware_version,
7432 identify->firmware_version_long,
7433 sizeof(identify->firmware_version_long));
7435 memcpy(ctrl_info->firmware_version,
7436 identify->firmware_version_short,
7437 sizeof(identify->firmware_version_short));
7438 ctrl_info->firmware_version
7439 [sizeof(identify->firmware_version_short)] = '\0';
7440 snprintf(ctrl_info->firmware_version +
7441 strlen(ctrl_info->firmware_version),
7442 sizeof(ctrl_info->firmware_version) -
7443 sizeof(identify->firmware_version_short),
7445 get_unaligned_le16(&identify->firmware_build_number));
7448 memcpy(ctrl_info->model, identify->product_id,
7449 sizeof(identify->product_id));
7450 ctrl_info->model[sizeof(identify->product_id)] = '\0';
7452 memcpy(ctrl_info->vendor, identify->vendor_id,
7453 sizeof(identify->vendor_id));
7454 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7462 struct pqi_config_table_section_info {
7463 struct pqi_ctrl_info *ctrl_info;
7466 void __iomem *section_iomem_addr;
7469 static inline bool pqi_is_firmware_feature_supported(
7470 struct pqi_config_table_firmware_features *firmware_features,
7471 unsigned int bit_position)
7473 unsigned int byte_index;
7475 byte_index = bit_position / BITS_PER_BYTE;
7477 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7480 return firmware_features->features_supported[byte_index] &
7481 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7484 static inline bool pqi_is_firmware_feature_enabled(
7485 struct pqi_config_table_firmware_features *firmware_features,
7486 void __iomem *firmware_features_iomem_addr,
7487 unsigned int bit_position)
7489 unsigned int byte_index;
7490 u8 __iomem *features_enabled_iomem_addr;
7492 byte_index = (bit_position / BITS_PER_BYTE) +
7493 (le16_to_cpu(firmware_features->num_elements) * 2);
7495 features_enabled_iomem_addr = firmware_features_iomem_addr +
7496 offsetof(struct pqi_config_table_firmware_features,
7497 features_supported) + byte_index;
7499 return *((__force u8 *)features_enabled_iomem_addr) &
7500 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7503 static inline void pqi_request_firmware_feature(
7504 struct pqi_config_table_firmware_features *firmware_features,
7505 unsigned int bit_position)
7507 unsigned int byte_index;
7509 byte_index = (bit_position / BITS_PER_BYTE) +
7510 le16_to_cpu(firmware_features->num_elements);
7512 firmware_features->features_supported[byte_index] |=
7513 (1 << (bit_position % BITS_PER_BYTE));
7516 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7517 u16 first_section, u16 last_section)
7519 struct pqi_vendor_general_request request;
7521 memset(&request, 0, sizeof(request));
7523 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7524 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7525 &request.header.iu_length);
7526 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7527 &request.function_code);
7528 put_unaligned_le16(first_section,
7529 &request.data.config_table_update.first_section);
7530 put_unaligned_le16(last_section,
7531 &request.data.config_table_update.last_section);
7533 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7536 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7537 struct pqi_config_table_firmware_features *firmware_features,
7538 void __iomem *firmware_features_iomem_addr)
7540 void *features_requested;
7541 void __iomem *features_requested_iomem_addr;
7542 void __iomem *host_max_known_feature_iomem_addr;
7544 features_requested = firmware_features->features_supported +
7545 le16_to_cpu(firmware_features->num_elements);
7547 features_requested_iomem_addr = firmware_features_iomem_addr +
7548 (features_requested - (void *)firmware_features);
7550 memcpy_toio(features_requested_iomem_addr, features_requested,
7551 le16_to_cpu(firmware_features->num_elements));
7553 if (pqi_is_firmware_feature_supported(firmware_features,
7554 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7555 host_max_known_feature_iomem_addr =
7556 features_requested_iomem_addr +
7557 (le16_to_cpu(firmware_features->num_elements) * 2) +
7559 writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
7560 host_max_known_feature_iomem_addr);
7563 return pqi_config_table_update(ctrl_info,
7564 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7565 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7568 struct pqi_firmware_feature {
7570 unsigned int feature_bit;
7573 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7574 struct pqi_firmware_feature *firmware_feature);
7577 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7578 struct pqi_firmware_feature *firmware_feature)
7580 if (!firmware_feature->supported) {
7581 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7582 firmware_feature->feature_name);
7586 if (firmware_feature->enabled) {
7587 dev_info(&ctrl_info->pci_dev->dev,
7588 "%s enabled\n", firmware_feature->feature_name);
7592 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7593 firmware_feature->feature_name);
7596 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7597 struct pqi_firmware_feature *firmware_feature)
7599 switch (firmware_feature->feature_bit) {
7600 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7601 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7603 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7604 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7606 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7607 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7609 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7610 ctrl_info->soft_reset_handshake_supported =
7611 firmware_feature->enabled &&
7612 pqi_read_soft_reset_status(ctrl_info);
7614 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7615 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7617 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7618 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7620 case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7621 ctrl_info->firmware_triage_supported = firmware_feature->enabled;
7622 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
7624 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7625 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7629 pqi_firmware_feature_status(ctrl_info, firmware_feature);
7632 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7633 struct pqi_firmware_feature *firmware_feature)
7635 if (firmware_feature->feature_status)
7636 firmware_feature->feature_status(ctrl_info, firmware_feature);
7639 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7641 static struct pqi_firmware_feature pqi_firmware_features[] = {
7643 .feature_name = "Online Firmware Activation",
7644 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7645 .feature_status = pqi_firmware_feature_status,
7648 .feature_name = "Serial Management Protocol",
7649 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7650 .feature_status = pqi_firmware_feature_status,
7653 .feature_name = "Maximum Known Feature",
7654 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7655 .feature_status = pqi_firmware_feature_status,
7658 .feature_name = "RAID 0 Read Bypass",
7659 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7660 .feature_status = pqi_firmware_feature_status,
7663 .feature_name = "RAID 1 Read Bypass",
7664 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7665 .feature_status = pqi_firmware_feature_status,
7668 .feature_name = "RAID 5 Read Bypass",
7669 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7670 .feature_status = pqi_firmware_feature_status,
7673 .feature_name = "RAID 6 Read Bypass",
7674 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7675 .feature_status = pqi_firmware_feature_status,
7678 .feature_name = "RAID 0 Write Bypass",
7679 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7680 .feature_status = pqi_firmware_feature_status,
7683 .feature_name = "RAID 1 Write Bypass",
7684 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7685 .feature_status = pqi_ctrl_update_feature_flags,
7688 .feature_name = "RAID 5 Write Bypass",
7689 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7690 .feature_status = pqi_ctrl_update_feature_flags,
7693 .feature_name = "RAID 6 Write Bypass",
7694 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7695 .feature_status = pqi_ctrl_update_feature_flags,
7698 .feature_name = "New Soft Reset Handshake",
7699 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
7700 .feature_status = pqi_ctrl_update_feature_flags,
7703 .feature_name = "RAID IU Timeout",
7704 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
7705 .feature_status = pqi_ctrl_update_feature_flags,
7708 .feature_name = "TMF IU Timeout",
7709 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
7710 .feature_status = pqi_ctrl_update_feature_flags,
7713 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
7714 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
7715 .feature_status = pqi_firmware_feature_status,
7718 .feature_name = "Firmware Triage",
7719 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
7720 .feature_status = pqi_ctrl_update_feature_flags,
7723 .feature_name = "RPL Extended Formats 4 and 5",
7724 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
7725 .feature_status = pqi_ctrl_update_feature_flags,
7729 static void pqi_process_firmware_features(
7730 struct pqi_config_table_section_info *section_info)
7733 struct pqi_ctrl_info *ctrl_info;
7734 struct pqi_config_table_firmware_features *firmware_features;
7735 void __iomem *firmware_features_iomem_addr;
7737 unsigned int num_features_supported;
7739 ctrl_info = section_info->ctrl_info;
7740 firmware_features = section_info->section;
7741 firmware_features_iomem_addr = section_info->section_iomem_addr;
7743 for (i = 0, num_features_supported = 0;
7744 i < ARRAY_SIZE(pqi_firmware_features); i++) {
7745 if (pqi_is_firmware_feature_supported(firmware_features,
7746 pqi_firmware_features[i].feature_bit)) {
7747 pqi_firmware_features[i].supported = true;
7748 num_features_supported++;
7750 pqi_firmware_feature_update(ctrl_info,
7751 &pqi_firmware_features[i]);
7755 if (num_features_supported == 0)
7758 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7759 if (!pqi_firmware_features[i].supported)
7761 pqi_request_firmware_feature(firmware_features,
7762 pqi_firmware_features[i].feature_bit);
7765 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
7766 firmware_features_iomem_addr);
7768 dev_err(&ctrl_info->pci_dev->dev,
7769 "failed to enable firmware features in PQI configuration table\n");
7770 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7771 if (!pqi_firmware_features[i].supported)
7773 pqi_firmware_feature_update(ctrl_info,
7774 &pqi_firmware_features[i]);
7779 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7780 if (!pqi_firmware_features[i].supported)
7782 if (pqi_is_firmware_feature_enabled(firmware_features,
7783 firmware_features_iomem_addr,
7784 pqi_firmware_features[i].feature_bit)) {
7785 pqi_firmware_features[i].enabled = true;
7787 pqi_firmware_feature_update(ctrl_info,
7788 &pqi_firmware_features[i]);
7792 static void pqi_init_firmware_features(void)
7796 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7797 pqi_firmware_features[i].supported = false;
7798 pqi_firmware_features[i].enabled = false;
7802 static void pqi_process_firmware_features_section(
7803 struct pqi_config_table_section_info *section_info)
7805 mutex_lock(&pqi_firmware_features_mutex);
7806 pqi_init_firmware_features();
7807 pqi_process_firmware_features(section_info);
7808 mutex_unlock(&pqi_firmware_features_mutex);
7812 * Reset all controller settings that can be initialized during the processing
7813 * of the PQI Configuration Table.
7816 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
7818 ctrl_info->heartbeat_counter = NULL;
7819 ctrl_info->soft_reset_status = NULL;
7820 ctrl_info->soft_reset_handshake_supported = false;
7821 ctrl_info->enable_r1_writes = false;
7822 ctrl_info->enable_r5_writes = false;
7823 ctrl_info->enable_r6_writes = false;
7824 ctrl_info->raid_iu_timeout_supported = false;
7825 ctrl_info->tmf_iu_timeout_supported = false;
7826 ctrl_info->firmware_triage_supported = false;
7827 ctrl_info->rpl_extended_format_4_5_supported = false;
7830 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7834 bool firmware_feature_section_present;
7835 void __iomem *table_iomem_addr;
7836 struct pqi_config_table *config_table;
7837 struct pqi_config_table_section_header *section;
7838 struct pqi_config_table_section_info section_info;
7839 struct pqi_config_table_section_info feature_section_info;
7841 table_length = ctrl_info->config_table_length;
7842 if (table_length == 0)
7845 config_table = kmalloc(table_length, GFP_KERNEL);
7846 if (!config_table) {
7847 dev_err(&ctrl_info->pci_dev->dev,
7848 "failed to allocate memory for PQI configuration table\n");
7853 * Copy the config table contents from I/O memory space into the
7856 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
7857 memcpy_fromio(config_table, table_iomem_addr, table_length);
7859 firmware_feature_section_present = false;
7860 section_info.ctrl_info = ctrl_info;
7861 section_offset = get_unaligned_le32(&config_table->first_section_offset);
7863 while (section_offset) {
7864 section = (void *)config_table + section_offset;
7866 section_info.section = section;
7867 section_info.section_offset = section_offset;
7868 section_info.section_iomem_addr = table_iomem_addr + section_offset;
7870 switch (get_unaligned_le16(§ion->section_id)) {
7871 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7872 firmware_feature_section_present = true;
7873 feature_section_info = section_info;
7875 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
7876 if (pqi_disable_heartbeat)
7877 dev_warn(&ctrl_info->pci_dev->dev,
7878 "heartbeat disabled by module parameter\n");
7880 ctrl_info->heartbeat_counter =
7883 offsetof(struct pqi_config_table_heartbeat,
7886 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7887 ctrl_info->soft_reset_status =
7890 offsetof(struct pqi_config_table_soft_reset,
7895 section_offset = get_unaligned_le16(§ion->next_section_offset);
7899 * We process the firmware feature section after all other sections
7900 * have been processed so that the feature bit callbacks can take
7901 * into account the settings configured by other sections.
7903 if (firmware_feature_section_present)
7904 pqi_process_firmware_features_section(&feature_section_info);
7906 kfree(config_table);
7911 /* Switches the controller from PQI mode back into SIS mode. */
7913 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7917 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
7918 rc = pqi_reset(ctrl_info);
7921 rc = sis_reenable_sis_mode(ctrl_info);
7923 dev_err(&ctrl_info->pci_dev->dev,
7924 "re-enabling SIS mode failed with error %d\n", rc);
7927 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7933 * If the controller isn't already in SIS mode, this function forces it into
7937 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
7939 if (!sis_is_firmware_running(ctrl_info))
7942 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
7945 if (sis_is_kernel_up(ctrl_info)) {
7946 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7950 return pqi_revert_to_sis_mode(ctrl_info);
7953 static void pqi_perform_lockup_action(void)
7955 switch (pqi_lockup_action) {
7957 panic("FATAL: Smart Family Controller lockup detected");
7960 emergency_restart();
7968 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7973 if (reset_devices) {
7974 if (pqi_is_fw_triage_supported(ctrl_info)) {
7975 rc = sis_wait_for_fw_triage_completion(ctrl_info);
7979 sis_soft_reset(ctrl_info);
7980 ssleep(PQI_POST_RESET_DELAY_SECS);
7982 rc = pqi_force_sis_mode(ctrl_info);
7988 * Wait until the controller is ready to start accepting SIS
7991 rc = sis_wait_for_ctrl_ready(ctrl_info);
7993 if (reset_devices) {
7994 dev_err(&ctrl_info->pci_dev->dev,
7995 "kdump init failed with error %d\n", rc);
7996 pqi_lockup_action = REBOOT;
7997 pqi_perform_lockup_action();
8003 * Get the controller properties. This allows us to determine
8004 * whether or not it supports PQI mode.
8006 rc = sis_get_ctrl_properties(ctrl_info);
8008 dev_err(&ctrl_info->pci_dev->dev,
8009 "error obtaining controller properties\n");
8013 rc = sis_get_pqi_capabilities(ctrl_info);
8015 dev_err(&ctrl_info->pci_dev->dev,
8016 "error obtaining controller capabilities\n");
8020 product_id = sis_get_product_id(ctrl_info);
8021 ctrl_info->product_id = (u8)product_id;
8022 ctrl_info->product_revision = (u8)(product_id >> 8);
8024 if (reset_devices) {
8025 if (ctrl_info->max_outstanding_requests >
8026 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
8027 ctrl_info->max_outstanding_requests =
8028 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
8030 if (ctrl_info->max_outstanding_requests >
8031 PQI_MAX_OUTSTANDING_REQUESTS)
8032 ctrl_info->max_outstanding_requests =
8033 PQI_MAX_OUTSTANDING_REQUESTS;
8036 pqi_calculate_io_resources(ctrl_info);
8038 rc = pqi_alloc_error_buffer(ctrl_info);
8040 dev_err(&ctrl_info->pci_dev->dev,
8041 "failed to allocate PQI error buffer\n");
8046 * If the function we are about to call succeeds, the
8047 * controller will transition from legacy SIS mode
8050 rc = sis_init_base_struct_addr(ctrl_info);
8052 dev_err(&ctrl_info->pci_dev->dev,
8053 "error initializing PQI mode\n");
8057 /* Wait for the controller to complete the SIS -> PQI transition. */
8058 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8060 dev_err(&ctrl_info->pci_dev->dev,
8061 "transition to PQI mode failed\n");
8065 /* From here on, we are running in PQI mode. */
8066 ctrl_info->pqi_mode_enabled = true;
8067 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8069 rc = pqi_alloc_admin_queues(ctrl_info);
8071 dev_err(&ctrl_info->pci_dev->dev,
8072 "failed to allocate admin queues\n");
8076 rc = pqi_create_admin_queues(ctrl_info);
8078 dev_err(&ctrl_info->pci_dev->dev,
8079 "error creating admin queues\n");
8083 rc = pqi_report_device_capability(ctrl_info);
8085 dev_err(&ctrl_info->pci_dev->dev,
8086 "obtaining device capability failed\n");
8090 rc = pqi_validate_device_capability(ctrl_info);
8094 pqi_calculate_queue_resources(ctrl_info);
8096 rc = pqi_enable_msix_interrupts(ctrl_info);
8100 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
8101 ctrl_info->max_msix_vectors =
8102 ctrl_info->num_msix_vectors_enabled;
8103 pqi_calculate_queue_resources(ctrl_info);
8106 rc = pqi_alloc_io_resources(ctrl_info);
8110 rc = pqi_alloc_operational_queues(ctrl_info);
8112 dev_err(&ctrl_info->pci_dev->dev,
8113 "failed to allocate operational queues\n");
8117 pqi_init_operational_queues(ctrl_info);
8119 rc = pqi_create_queues(ctrl_info);
8123 rc = pqi_request_irqs(ctrl_info);
8127 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8129 ctrl_info->controller_online = true;
8131 rc = pqi_process_config_table(ctrl_info);
8135 pqi_start_heartbeat_timer(ctrl_info);
8137 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8138 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8139 if (rc) { /* Supported features not returned correctly. */
8140 dev_err(&ctrl_info->pci_dev->dev,
8141 "error obtaining advanced RAID bypass configuration\n");
8144 ctrl_info->ciss_report_log_flags |=
8145 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8148 rc = pqi_enable_events(ctrl_info);
8150 dev_err(&ctrl_info->pci_dev->dev,
8151 "error enabling events\n");
8155 /* Register with the SCSI subsystem. */
8156 rc = pqi_register_scsi(ctrl_info);
8160 rc = pqi_get_ctrl_product_details(ctrl_info);
8162 dev_err(&ctrl_info->pci_dev->dev,
8163 "error obtaining product details\n");
8167 rc = pqi_get_ctrl_serial_number(ctrl_info);
8169 dev_err(&ctrl_info->pci_dev->dev,
8170 "error obtaining ctrl serial number\n");
8174 rc = pqi_set_diag_rescan(ctrl_info);
8176 dev_err(&ctrl_info->pci_dev->dev,
8177 "error enabling multi-lun rescan\n");
8181 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8183 dev_err(&ctrl_info->pci_dev->dev,
8184 "error updating host wellness\n");
8188 pqi_schedule_update_time_worker(ctrl_info);
8190 pqi_scan_scsi_devices(ctrl_info);
8195 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8198 struct pqi_admin_queues *admin_queues;
8199 struct pqi_event_queue *event_queue;
8201 admin_queues = &ctrl_info->admin_queues;
8202 admin_queues->iq_pi_copy = 0;
8203 admin_queues->oq_ci_copy = 0;
8204 writel(0, admin_queues->oq_pi);
8206 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8207 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8208 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8209 ctrl_info->queue_groups[i].oq_ci_copy = 0;
8211 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8212 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8213 writel(0, ctrl_info->queue_groups[i].oq_pi);
8216 event_queue = &ctrl_info->event_queue;
8217 writel(0, event_queue->oq_pi);
8218 event_queue->oq_ci_copy = 0;
8221 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8225 rc = pqi_force_sis_mode(ctrl_info);
8230 * Wait until the controller is ready to start accepting SIS
8233 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8238 * Get the controller properties. This allows us to determine
8239 * whether or not it supports PQI mode.
8241 rc = sis_get_ctrl_properties(ctrl_info);
8243 dev_err(&ctrl_info->pci_dev->dev,
8244 "error obtaining controller properties\n");
8248 rc = sis_get_pqi_capabilities(ctrl_info);
8250 dev_err(&ctrl_info->pci_dev->dev,
8251 "error obtaining controller capabilities\n");
8256 * If the function we are about to call succeeds, the
8257 * controller will transition from legacy SIS mode
8260 rc = sis_init_base_struct_addr(ctrl_info);
8262 dev_err(&ctrl_info->pci_dev->dev,
8263 "error initializing PQI mode\n");
8267 /* Wait for the controller to complete the SIS -> PQI transition. */
8268 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8270 dev_err(&ctrl_info->pci_dev->dev,
8271 "transition to PQI mode failed\n");
8275 /* From here on, we are running in PQI mode. */
8276 ctrl_info->pqi_mode_enabled = true;
8277 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8279 pqi_reinit_queues(ctrl_info);
8281 rc = pqi_create_admin_queues(ctrl_info);
8283 dev_err(&ctrl_info->pci_dev->dev,
8284 "error creating admin queues\n");
8288 rc = pqi_create_queues(ctrl_info);
8292 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8294 ctrl_info->controller_online = true;
8295 pqi_ctrl_unblock_requests(ctrl_info);
8297 pqi_ctrl_reset_config(ctrl_info);
8299 rc = pqi_process_config_table(ctrl_info);
8303 pqi_start_heartbeat_timer(ctrl_info);
8305 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8306 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8308 dev_err(&ctrl_info->pci_dev->dev,
8309 "error obtaining advanced RAID bypass configuration\n");
8312 ctrl_info->ciss_report_log_flags |=
8313 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8316 rc = pqi_enable_events(ctrl_info);
8318 dev_err(&ctrl_info->pci_dev->dev,
8319 "error enabling events\n");
8323 rc = pqi_get_ctrl_product_details(ctrl_info);
8325 dev_err(&ctrl_info->pci_dev->dev,
8326 "error obtaining product details\n");
8330 rc = pqi_set_diag_rescan(ctrl_info);
8332 dev_err(&ctrl_info->pci_dev->dev,
8333 "error enabling multi-lun rescan\n");
8337 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8339 dev_err(&ctrl_info->pci_dev->dev,
8340 "error updating host wellness\n");
8344 if (pqi_ofa_in_progress(ctrl_info))
8345 pqi_ctrl_unblock_scan(ctrl_info);
8347 pqi_scan_scsi_devices(ctrl_info);
8352 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
8356 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
8357 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8359 return pcibios_err_to_errno(rc);
8362 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8367 rc = pci_enable_device(ctrl_info->pci_dev);
8369 dev_err(&ctrl_info->pci_dev->dev,
8370 "failed to enable PCI device\n");
8374 if (sizeof(dma_addr_t) > 4)
8375 mask = DMA_BIT_MASK(64);
8377 mask = DMA_BIT_MASK(32);
8379 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8381 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8382 goto disable_device;
8385 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8387 dev_err(&ctrl_info->pci_dev->dev,
8388 "failed to obtain PCI resources\n");
8389 goto disable_device;
8392 ctrl_info->iomem_base = ioremap(pci_resource_start(
8393 ctrl_info->pci_dev, 0),
8394 sizeof(struct pqi_ctrl_registers));
8395 if (!ctrl_info->iomem_base) {
8396 dev_err(&ctrl_info->pci_dev->dev,
8397 "failed to map memory for controller registers\n");
8399 goto release_regions;
8402 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
8404 /* Increase the PCIe completion timeout. */
8405 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8406 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8408 dev_err(&ctrl_info->pci_dev->dev,
8409 "failed to set PCIe completion timeout\n");
8410 goto release_regions;
8413 /* Enable bus mastering. */
8414 pci_set_master(ctrl_info->pci_dev);
8416 ctrl_info->registers = ctrl_info->iomem_base;
8417 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8419 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8424 pci_release_regions(ctrl_info->pci_dev);
8426 pci_disable_device(ctrl_info->pci_dev);
8431 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8433 iounmap(ctrl_info->iomem_base);
8434 pci_release_regions(ctrl_info->pci_dev);
8435 if (pci_is_enabled(ctrl_info->pci_dev))
8436 pci_disable_device(ctrl_info->pci_dev);
8437 pci_set_drvdata(ctrl_info->pci_dev, NULL);
8440 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8442 struct pqi_ctrl_info *ctrl_info;
8444 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8445 GFP_KERNEL, numa_node);
8449 mutex_init(&ctrl_info->scan_mutex);
8450 mutex_init(&ctrl_info->lun_reset_mutex);
8451 mutex_init(&ctrl_info->ofa_mutex);
8453 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8454 spin_lock_init(&ctrl_info->scsi_device_list_lock);
8456 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8457 atomic_set(&ctrl_info->num_interrupts, 0);
8459 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8460 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8462 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8463 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8465 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8466 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8468 sema_init(&ctrl_info->sync_request_sem,
8469 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8470 init_waitqueue_head(&ctrl_info->block_requests_wait);
8472 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8473 ctrl_info->irq_mode = IRQ_MODE_NONE;
8474 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8476 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8477 ctrl_info->max_transfer_encrypted_sas_sata =
8478 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8479 ctrl_info->max_transfer_encrypted_nvme =
8480 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8481 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8482 ctrl_info->max_write_raid_1_10_2drive = ~0;
8483 ctrl_info->max_write_raid_1_10_3drive = ~0;
8488 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8493 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8495 pqi_free_irqs(ctrl_info);
8496 pqi_disable_msix_interrupts(ctrl_info);
8499 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8501 pqi_stop_heartbeat_timer(ctrl_info);
8502 pqi_free_interrupts(ctrl_info);
8503 if (ctrl_info->queue_memory_base)
8504 dma_free_coherent(&ctrl_info->pci_dev->dev,
8505 ctrl_info->queue_memory_length,
8506 ctrl_info->queue_memory_base,
8507 ctrl_info->queue_memory_base_dma_handle);
8508 if (ctrl_info->admin_queue_memory_base)
8509 dma_free_coherent(&ctrl_info->pci_dev->dev,
8510 ctrl_info->admin_queue_memory_length,
8511 ctrl_info->admin_queue_memory_base,
8512 ctrl_info->admin_queue_memory_base_dma_handle);
8513 pqi_free_all_io_requests(ctrl_info);
8514 if (ctrl_info->error_buffer)
8515 dma_free_coherent(&ctrl_info->pci_dev->dev,
8516 ctrl_info->error_buffer_length,
8517 ctrl_info->error_buffer,
8518 ctrl_info->error_buffer_dma_handle);
8519 if (ctrl_info->iomem_base)
8520 pqi_cleanup_pci_init(ctrl_info);
8521 pqi_free_ctrl_info(ctrl_info);
8524 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8526 pqi_cancel_rescan_worker(ctrl_info);
8527 pqi_cancel_update_time_worker(ctrl_info);
8528 pqi_remove_all_scsi_devices(ctrl_info);
8529 pqi_unregister_scsi(ctrl_info);
8530 if (ctrl_info->pqi_mode_enabled)
8531 pqi_revert_to_sis_mode(ctrl_info);
8532 pqi_free_ctrl_resources(ctrl_info);
8535 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8537 pqi_ctrl_block_scan(ctrl_info);
8538 pqi_scsi_block_requests(ctrl_info);
8539 pqi_ctrl_block_device_reset(ctrl_info);
8540 pqi_ctrl_block_requests(ctrl_info);
8541 pqi_ctrl_wait_until_quiesced(ctrl_info);
8542 pqi_stop_heartbeat_timer(ctrl_info);
8545 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8547 pqi_start_heartbeat_timer(ctrl_info);
8548 pqi_ctrl_unblock_requests(ctrl_info);
8549 pqi_ctrl_unblock_device_reset(ctrl_info);
8550 pqi_scsi_unblock_requests(ctrl_info);
8551 pqi_ctrl_unblock_scan(ctrl_info);
8554 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
8559 struct pqi_ofa_memory *ofap;
8560 struct pqi_sg_descriptor *mem_descriptor;
8561 dma_addr_t dma_handle;
8563 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8565 sg_count = DIV_ROUND_UP(total_size, chunk_size);
8566 if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
8569 ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
8570 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
8573 dev = &ctrl_info->pci_dev->dev;
8575 for (i = 0; i < sg_count; i++) {
8576 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
8577 dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8578 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
8579 goto out_free_chunks;
8580 mem_descriptor = &ofap->sg_descriptor[i];
8581 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8582 put_unaligned_le32(chunk_size, &mem_descriptor->length);
8585 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8586 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
8587 put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
8593 mem_descriptor = &ofap->sg_descriptor[i];
8594 dma_free_coherent(dev, chunk_size,
8595 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8596 get_unaligned_le64(&mem_descriptor->address));
8598 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8604 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
8610 if (ctrl_info->ofa_bytes_requested == 0)
8613 total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
8614 min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
8615 min_chunk_size = PAGE_ALIGN(min_chunk_size);
8617 for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
8618 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
8621 chunk_size = PAGE_ALIGN(chunk_size);
8627 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
8630 struct pqi_ofa_memory *ofap;
8632 dev = &ctrl_info->pci_dev->dev;
8634 ofap = dma_alloc_coherent(dev, sizeof(*ofap),
8635 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
8639 ctrl_info->pqi_ofa_mem_virt_addr = ofap;
8641 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
8643 "failed to allocate host buffer for Online Firmware Activation\n");
8644 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
8645 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8649 put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
8650 memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
8653 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
8657 struct pqi_ofa_memory *ofap;
8658 struct pqi_sg_descriptor *mem_descriptor;
8659 unsigned int num_memory_descriptors;
8661 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8665 dev = &ctrl_info->pci_dev->dev;
8667 if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
8670 mem_descriptor = ofap->sg_descriptor;
8671 num_memory_descriptors =
8672 get_unaligned_le16(&ofap->num_memory_descriptors);
8674 for (i = 0; i < num_memory_descriptors; i++) {
8675 dma_free_coherent(dev,
8676 get_unaligned_le32(&mem_descriptor[i].length),
8677 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8678 get_unaligned_le64(&mem_descriptor[i].address));
8680 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8683 dma_free_coherent(dev, sizeof(*ofap), ofap,
8684 ctrl_info->pqi_ofa_mem_dma_handle);
8685 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8688 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
8691 struct pqi_vendor_general_request request;
8692 struct pqi_ofa_memory *ofap;
8694 memset(&request, 0, sizeof(request));
8696 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
8697 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
8698 &request.header.iu_length);
8699 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
8700 &request.function_code);
8702 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8705 buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
8706 get_unaligned_le16(&ofap->num_memory_descriptors) *
8707 sizeof(struct pqi_sg_descriptor);
8709 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
8710 &request.data.ofa_memory_allocation.buffer_address);
8711 put_unaligned_le32(buffer_length,
8712 &request.data.ofa_memory_allocation.buffer_length);
8715 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
8718 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8722 return pqi_ctrl_init_resume(ctrl_info);
8725 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
8726 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
8727 .status = SAM_STAT_CHECK_CONDITION,
8730 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
8733 struct pqi_io_request *io_request;
8734 struct scsi_cmnd *scmd;
8735 struct scsi_device *sdev;
8737 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8738 io_request = &ctrl_info->io_request_pool[i];
8739 if (atomic_read(&io_request->refcount) == 0)
8742 scmd = io_request->scmd;
8744 sdev = scmd->device;
8745 if (!sdev || !scsi_device_online(sdev)) {
8746 pqi_free_io_request(io_request);
8749 set_host_byte(scmd, DID_NO_CONNECT);
8752 io_request->status = -ENXIO;
8753 io_request->error_info =
8754 &pqi_ctrl_offline_raid_error_info;
8757 io_request->io_complete_callback(io_request,
8758 io_request->context);
8762 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
8764 pqi_perform_lockup_action();
8765 pqi_stop_heartbeat_timer(ctrl_info);
8766 pqi_free_interrupts(ctrl_info);
8767 pqi_cancel_rescan_worker(ctrl_info);
8768 pqi_cancel_update_time_worker(ctrl_info);
8769 pqi_ctrl_wait_until_quiesced(ctrl_info);
8770 pqi_fail_all_outstanding_requests(ctrl_info);
8771 pqi_ctrl_unblock_requests(ctrl_info);
8774 static void pqi_ctrl_offline_worker(struct work_struct *work)
8776 struct pqi_ctrl_info *ctrl_info;
8778 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
8779 pqi_take_ctrl_offline_deferred(ctrl_info);
8782 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
8783 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
8785 if (!ctrl_info->controller_online)
8788 ctrl_info->controller_online = false;
8789 ctrl_info->pqi_mode_enabled = false;
8790 pqi_ctrl_block_requests(ctrl_info);
8791 if (!pqi_disable_ctrl_shutdown)
8792 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
8793 pci_disable_device(ctrl_info->pci_dev);
8794 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
8795 schedule_work(&ctrl_info->ctrl_offline_work);
8798 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
8799 const struct pci_device_id *id)
8801 char *ctrl_description;
8803 if (id->driver_data)
8804 ctrl_description = (char *)id->driver_data;
8806 ctrl_description = "Microchip Smart Family Controller";
8808 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
8811 static int pqi_pci_probe(struct pci_dev *pci_dev,
8812 const struct pci_device_id *id)
8816 struct pqi_ctrl_info *ctrl_info;
8818 pqi_print_ctrl_info(pci_dev, id);
8820 if (pqi_disable_device_id_wildcards &&
8821 id->subvendor == PCI_ANY_ID &&
8822 id->subdevice == PCI_ANY_ID) {
8823 dev_warn(&pci_dev->dev,
8824 "controller not probed because device ID wildcards are disabled\n");
8828 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
8829 dev_warn(&pci_dev->dev,
8830 "controller device ID matched using wildcards\n");
8832 node = dev_to_node(&pci_dev->dev);
8833 if (node == NUMA_NO_NODE) {
8834 node = cpu_to_node(0);
8835 if (node == NUMA_NO_NODE)
8837 set_dev_node(&pci_dev->dev, node);
8840 ctrl_info = pqi_alloc_ctrl_info(node);
8842 dev_err(&pci_dev->dev,
8843 "failed to allocate controller info block\n");
8847 ctrl_info->pci_dev = pci_dev;
8849 rc = pqi_pci_init(ctrl_info);
8853 rc = pqi_ctrl_init(ctrl_info);
8860 pqi_remove_ctrl(ctrl_info);
8865 static void pqi_pci_remove(struct pci_dev *pci_dev)
8867 struct pqi_ctrl_info *ctrl_info;
8869 ctrl_info = pci_get_drvdata(pci_dev);
8873 pqi_remove_ctrl(ctrl_info);
8876 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
8879 struct pqi_io_request *io_request;
8880 struct scsi_cmnd *scmd;
8882 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8883 io_request = &ctrl_info->io_request_pool[i];
8884 if (atomic_read(&io_request->refcount) == 0)
8886 scmd = io_request->scmd;
8887 WARN_ON(scmd != NULL); /* IO command from SML */
8888 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
8892 static void pqi_shutdown(struct pci_dev *pci_dev)
8895 struct pqi_ctrl_info *ctrl_info;
8896 enum bmic_flush_cache_shutdown_event shutdown_event;
8898 ctrl_info = pci_get_drvdata(pci_dev);
8900 dev_err(&pci_dev->dev,
8901 "cache could not be flushed\n");
8905 pqi_wait_until_ofa_finished(ctrl_info);
8907 pqi_scsi_block_requests(ctrl_info);
8908 pqi_ctrl_block_device_reset(ctrl_info);
8909 pqi_ctrl_block_requests(ctrl_info);
8910 pqi_ctrl_wait_until_quiesced(ctrl_info);
8912 if (system_state == SYSTEM_RESTART)
8913 shutdown_event = RESTART;
8915 shutdown_event = SHUTDOWN;
8918 * Write all data in the controller's battery-backed cache to
8921 rc = pqi_flush_cache(ctrl_info, shutdown_event);
8923 dev_err(&pci_dev->dev,
8924 "unable to flush controller cache\n");
8926 pqi_crash_if_pending_command(ctrl_info);
8927 pqi_reset(ctrl_info);
8930 static void pqi_process_lockup_action_param(void)
8934 if (!pqi_lockup_action_param)
8937 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
8938 if (strcmp(pqi_lockup_action_param,
8939 pqi_lockup_actions[i].name) == 0) {
8940 pqi_lockup_action = pqi_lockup_actions[i].action;
8945 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
8946 DRIVER_NAME_SHORT, pqi_lockup_action_param);
8949 static void pqi_process_module_params(void)
8951 pqi_process_lockup_action_param();
8954 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
8956 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
8962 static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
8964 struct pci_dev *pci_dev;
8965 struct pqi_ctrl_info *ctrl_info;
8967 pci_dev = to_pci_dev(dev);
8968 ctrl_info = pci_get_drvdata(pci_dev);
8970 pqi_wait_until_ofa_finished(ctrl_info);
8972 pqi_ctrl_block_scan(ctrl_info);
8973 pqi_scsi_block_requests(ctrl_info);
8974 pqi_ctrl_block_device_reset(ctrl_info);
8975 pqi_ctrl_block_requests(ctrl_info);
8976 pqi_ctrl_wait_until_quiesced(ctrl_info);
8979 enum bmic_flush_cache_shutdown_event shutdown_event;
8981 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
8982 pqi_flush_cache(ctrl_info, shutdown_event);
8985 pqi_stop_heartbeat_timer(ctrl_info);
8986 pqi_crash_if_pending_command(ctrl_info);
8987 pqi_free_irqs(ctrl_info);
8989 ctrl_info->controller_online = false;
8990 ctrl_info->pqi_mode_enabled = false;
8995 static __maybe_unused int pqi_suspend(struct device *dev)
8997 return pqi_suspend_or_freeze(dev, true);
9000 static int pqi_resume_or_restore(struct device *dev)
9003 struct pci_dev *pci_dev;
9004 struct pqi_ctrl_info *ctrl_info;
9006 pci_dev = to_pci_dev(dev);
9007 ctrl_info = pci_get_drvdata(pci_dev);
9009 rc = pqi_request_irqs(ctrl_info);
9013 pqi_ctrl_unblock_device_reset(ctrl_info);
9014 pqi_ctrl_unblock_requests(ctrl_info);
9015 pqi_scsi_unblock_requests(ctrl_info);
9016 pqi_ctrl_unblock_scan(ctrl_info);
9018 ssleep(PQI_POST_RESET_DELAY_SECS);
9020 return pqi_ctrl_init_resume(ctrl_info);
9023 static int pqi_freeze(struct device *dev)
9025 return pqi_suspend_or_freeze(dev, false);
9028 static int pqi_thaw(struct device *dev)
9031 struct pci_dev *pci_dev;
9032 struct pqi_ctrl_info *ctrl_info;
9034 pci_dev = to_pci_dev(dev);
9035 ctrl_info = pci_get_drvdata(pci_dev);
9037 rc = pqi_request_irqs(ctrl_info);
9041 ctrl_info->controller_online = true;
9042 ctrl_info->pqi_mode_enabled = true;
9044 pqi_ctrl_unblock_device_reset(ctrl_info);
9045 pqi_ctrl_unblock_requests(ctrl_info);
9046 pqi_scsi_unblock_requests(ctrl_info);
9047 pqi_ctrl_unblock_scan(ctrl_info);
9052 static int pqi_poweroff(struct device *dev)
9054 struct pci_dev *pci_dev;
9055 struct pqi_ctrl_info *ctrl_info;
9056 enum bmic_flush_cache_shutdown_event shutdown_event;
9058 pci_dev = to_pci_dev(dev);
9059 ctrl_info = pci_get_drvdata(pci_dev);
9061 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9062 pqi_flush_cache(ctrl_info, shutdown_event);
9067 static const struct dev_pm_ops pqi_pm_ops = {
9068 .suspend = pqi_suspend,
9069 .resume = pqi_resume_or_restore,
9070 .freeze = pqi_freeze,
9072 .poweroff = pqi_poweroff,
9073 .restore = pqi_resume_or_restore,
9076 /* Define the PCI IDs for the controllers that we support. */
9077 static const struct pci_device_id pqi_pci_id_table[] = {
9079 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9083 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9087 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9091 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9095 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9099 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9103 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9107 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9111 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9115 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9119 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9123 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9127 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9131 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9135 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9139 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9143 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9147 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9151 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9155 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9159 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9163 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9167 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9171 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9175 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9179 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9183 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9187 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9191 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9195 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9199 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9203 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9207 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9211 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9215 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9219 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9223 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9227 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9231 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9235 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9239 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9243 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9247 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9251 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9255 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9256 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9259 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9260 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
9263 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9264 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
9267 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9268 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
9271 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9272 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
9275 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9276 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
9279 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9280 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
9283 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9284 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
9287 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9288 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
9291 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9292 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9295 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9296 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9299 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9300 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9303 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9304 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9307 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9308 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
9311 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9312 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
9315 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9316 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
9319 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9320 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
9323 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9324 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
9327 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9328 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
9331 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9332 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
9335 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9336 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
9339 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9340 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
9343 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9344 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9347 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9348 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
9351 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9352 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
9355 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9356 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
9359 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9360 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
9363 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9364 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
9367 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9368 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9371 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9372 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
9375 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9376 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
9379 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9380 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9383 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9384 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9387 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9388 PCI_VENDOR_ID_ADAPTEC2, 0x1304)
9391 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9392 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9395 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9396 PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9399 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9400 PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9403 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9404 PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9407 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9408 PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9411 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9412 PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9415 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9416 PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9419 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9420 PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9423 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9424 PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9427 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9428 PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9431 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9432 PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9435 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9436 PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9439 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9440 PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9443 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9444 PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9447 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9448 PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9451 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9452 PCI_VENDOR_ID_ADAPTEC2, 0x1463)
9455 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9456 PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9459 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9460 PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9463 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9464 PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9467 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9468 PCI_VENDOR_ID_ADAPTEC2, 0x1473)
9471 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9472 PCI_VENDOR_ID_ADAPTEC2, 0x1474)
9475 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9476 PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9479 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9480 PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9483 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9484 PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9487 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9488 PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9491 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9492 PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9495 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9496 PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
9499 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9500 PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
9503 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9504 PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
9507 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9508 PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
9511 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9512 PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
9515 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9516 PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
9519 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9520 PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
9523 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9524 PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
9527 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9528 PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
9531 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9532 PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
9535 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9536 PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
9539 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9540 PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
9543 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9544 PCI_VENDOR_ID_ADVANTECH, 0x8312)
9547 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9548 PCI_VENDOR_ID_DELL, 0x1fe0)
9551 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9552 PCI_VENDOR_ID_HP, 0x0600)
9555 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9556 PCI_VENDOR_ID_HP, 0x0601)
9559 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9560 PCI_VENDOR_ID_HP, 0x0602)
9563 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9564 PCI_VENDOR_ID_HP, 0x0603)
9567 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9568 PCI_VENDOR_ID_HP, 0x0609)
9571 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9572 PCI_VENDOR_ID_HP, 0x0650)
9575 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9576 PCI_VENDOR_ID_HP, 0x0651)
9579 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9580 PCI_VENDOR_ID_HP, 0x0652)
9583 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9584 PCI_VENDOR_ID_HP, 0x0653)
9587 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9588 PCI_VENDOR_ID_HP, 0x0654)
9591 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9592 PCI_VENDOR_ID_HP, 0x0655)
9595 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9596 PCI_VENDOR_ID_HP, 0x0700)
9599 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9600 PCI_VENDOR_ID_HP, 0x0701)
9603 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9604 PCI_VENDOR_ID_HP, 0x1001)
9607 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9608 PCI_VENDOR_ID_HP, 0x1002)
9611 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9612 PCI_VENDOR_ID_HP, 0x1100)
9615 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9616 PCI_VENDOR_ID_HP, 0x1101)
9619 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9623 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9627 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9631 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9635 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9639 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9643 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9647 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9651 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9655 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9659 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9663 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9667 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9668 PCI_VENDOR_ID_GIGABYTE, 0x1000)
9671 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9675 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9679 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9683 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9687 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9691 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9695 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9699 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9703 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9707 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9711 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9715 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9719 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9723 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9727 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9728 PCI_ANY_ID, PCI_ANY_ID)
9733 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
9735 static struct pci_driver pqi_pci_driver = {
9736 .name = DRIVER_NAME_SHORT,
9737 .id_table = pqi_pci_id_table,
9738 .probe = pqi_pci_probe,
9739 .remove = pqi_pci_remove,
9740 .shutdown = pqi_shutdown,
9741 #if defined(CONFIG_PM)
9748 static int __init pqi_init(void)
9752 pr_info(DRIVER_NAME "\n");
9753 pqi_verify_structures();
9754 sis_verify_structures();
9756 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
9757 if (!pqi_sas_transport_template)
9760 pqi_process_module_params();
9762 rc = pci_register_driver(&pqi_pci_driver);
9764 sas_release_transport(pqi_sas_transport_template);
9769 static void __exit pqi_cleanup(void)
9771 pci_unregister_driver(&pqi_pci_driver);
9772 sas_release_transport(pqi_sas_transport_template);
9775 module_init(pqi_init);
9776 module_exit(pqi_cleanup);
9778 static void pqi_verify_structures(void)
9780 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9781 sis_host_to_ctrl_doorbell) != 0x20);
9782 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9783 sis_interrupt_mask) != 0x34);
9784 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9785 sis_ctrl_to_host_doorbell) != 0x9c);
9786 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9787 sis_ctrl_to_host_doorbell_clear) != 0xa0);
9788 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9789 sis_driver_scratch) != 0xb0);
9790 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9791 sis_product_identifier) != 0xb4);
9792 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9793 sis_firmware_status) != 0xbc);
9794 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9795 sis_ctrl_shutdown_reason_code) != 0xcc);
9796 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9797 sis_mailbox) != 0x1000);
9798 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9799 pqi_registers) != 0x4000);
9801 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9803 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9805 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9806 response_queue_id) != 0x4);
9807 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9808 driver_flags) != 0x6);
9809 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
9811 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9813 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9814 service_response) != 0x1);
9815 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9816 data_present) != 0x2);
9817 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9819 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9820 residual_count) != 0x4);
9821 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9822 data_length) != 0x8);
9823 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9825 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9827 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
9829 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9830 data_in_result) != 0x0);
9831 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9832 data_out_result) != 0x1);
9833 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9835 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9837 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9838 status_qualifier) != 0x6);
9839 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9840 sense_data_length) != 0x8);
9841 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9842 response_data_length) != 0xa);
9843 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9844 data_in_transferred) != 0xc);
9845 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9846 data_out_transferred) != 0x10);
9847 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9849 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
9851 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9853 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9854 function_and_status_code) != 0x8);
9855 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9856 max_admin_iq_elements) != 0x10);
9857 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9858 max_admin_oq_elements) != 0x11);
9859 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9860 admin_iq_element_length) != 0x12);
9861 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9862 admin_oq_element_length) != 0x13);
9863 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9864 max_reset_timeout) != 0x14);
9865 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9866 legacy_intx_status) != 0x18);
9867 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9868 legacy_intx_mask_set) != 0x1c);
9869 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9870 legacy_intx_mask_clear) != 0x20);
9871 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9872 device_status) != 0x40);
9873 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9874 admin_iq_pi_offset) != 0x48);
9875 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9876 admin_oq_ci_offset) != 0x50);
9877 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9878 admin_iq_element_array_addr) != 0x58);
9879 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9880 admin_oq_element_array_addr) != 0x60);
9881 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9882 admin_iq_ci_addr) != 0x68);
9883 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9884 admin_oq_pi_addr) != 0x70);
9885 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9886 admin_iq_num_elements) != 0x78);
9887 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9888 admin_oq_num_elements) != 0x79);
9889 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9890 admin_queue_int_msg_num) != 0x7a);
9891 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9892 device_error) != 0x80);
9893 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9894 error_details) != 0x88);
9895 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9896 device_reset) != 0x90);
9897 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9898 power_action) != 0x94);
9899 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
9901 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9902 header.iu_type) != 0);
9903 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9904 header.iu_length) != 2);
9905 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9906 header.driver_flags) != 6);
9907 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9909 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9910 function_code) != 10);
9911 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9912 data.report_device_capability.buffer_length) != 44);
9913 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9914 data.report_device_capability.sg_descriptor) != 48);
9915 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9916 data.create_operational_iq.queue_id) != 12);
9917 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9918 data.create_operational_iq.element_array_addr) != 16);
9919 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9920 data.create_operational_iq.ci_addr) != 24);
9921 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9922 data.create_operational_iq.num_elements) != 32);
9923 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9924 data.create_operational_iq.element_length) != 34);
9925 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9926 data.create_operational_iq.queue_protocol) != 36);
9927 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9928 data.create_operational_oq.queue_id) != 12);
9929 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9930 data.create_operational_oq.element_array_addr) != 16);
9931 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9932 data.create_operational_oq.pi_addr) != 24);
9933 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9934 data.create_operational_oq.num_elements) != 32);
9935 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9936 data.create_operational_oq.element_length) != 34);
9937 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9938 data.create_operational_oq.queue_protocol) != 36);
9939 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9940 data.create_operational_oq.int_msg_num) != 40);
9941 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9942 data.create_operational_oq.coalescing_count) != 42);
9943 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9944 data.create_operational_oq.min_coalescing_time) != 44);
9945 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9946 data.create_operational_oq.max_coalescing_time) != 48);
9947 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9948 data.delete_operational_queue.queue_id) != 12);
9949 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
9950 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9951 data.create_operational_iq) != 64 - 11);
9952 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9953 data.create_operational_oq) != 64 - 11);
9954 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9955 data.delete_operational_queue) != 64 - 11);
9957 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9958 header.iu_type) != 0);
9959 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9960 header.iu_length) != 2);
9961 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9962 header.driver_flags) != 6);
9963 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9965 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9966 function_code) != 10);
9967 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9969 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9970 data.create_operational_iq.status_descriptor) != 12);
9971 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9972 data.create_operational_iq.iq_pi_offset) != 16);
9973 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9974 data.create_operational_oq.status_descriptor) != 12);
9975 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9976 data.create_operational_oq.oq_ci_offset) != 16);
9977 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
9979 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9980 header.iu_type) != 0);
9981 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9982 header.iu_length) != 2);
9983 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9984 header.response_queue_id) != 4);
9985 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9986 header.driver_flags) != 6);
9987 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9989 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9991 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9992 buffer_length) != 12);
9993 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9995 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9996 protocol_specific) != 24);
9997 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9998 error_index) != 27);
9999 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10001 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10003 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10004 sg_descriptors) != 64);
10005 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10006 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10008 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10009 header.iu_type) != 0);
10010 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10011 header.iu_length) != 2);
10012 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10013 header.response_queue_id) != 4);
10014 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10015 header.driver_flags) != 6);
10016 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10018 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10020 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10021 buffer_length) != 16);
10022 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10023 data_encryption_key_index) != 22);
10024 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10025 encrypt_tweak_lower) != 24);
10026 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10027 encrypt_tweak_upper) != 28);
10028 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10030 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10031 error_index) != 48);
10032 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10033 num_sg_descriptors) != 50);
10034 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10035 cdb_length) != 51);
10036 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10037 lun_number) != 52);
10038 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10039 sg_descriptors) != 64);
10040 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10041 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10043 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10044 header.iu_type) != 0);
10045 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10046 header.iu_length) != 2);
10047 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10049 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10050 error_index) != 10);
10052 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10053 header.iu_type) != 0);
10054 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10055 header.iu_length) != 2);
10056 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10057 header.response_queue_id) != 4);
10058 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10060 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10061 data.report_event_configuration.buffer_length) != 12);
10062 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10063 data.report_event_configuration.sg_descriptors) != 16);
10064 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10065 data.set_event_configuration.global_event_oq_id) != 10);
10066 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10067 data.set_event_configuration.buffer_length) != 12);
10068 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10069 data.set_event_configuration.sg_descriptors) != 16);
10071 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10072 max_inbound_iu_length) != 6);
10073 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10074 max_outbound_iu_length) != 14);
10075 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
10077 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10078 data_length) != 0);
10079 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10080 iq_arbitration_priority_support_bitmask) != 8);
10081 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10082 maximum_aw_a) != 9);
10083 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10084 maximum_aw_b) != 10);
10085 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10086 maximum_aw_c) != 11);
10087 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10088 max_inbound_queues) != 16);
10089 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10090 max_elements_per_iq) != 18);
10091 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10092 max_iq_element_length) != 24);
10093 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10094 min_iq_element_length) != 26);
10095 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10096 max_outbound_queues) != 30);
10097 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10098 max_elements_per_oq) != 32);
10099 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10100 intr_coalescing_time_granularity) != 34);
10101 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10102 max_oq_element_length) != 36);
10103 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10104 min_oq_element_length) != 38);
10105 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10106 iu_layer_descriptors) != 64);
10107 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
10109 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10111 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10113 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
10115 BUILD_BUG_ON(offsetof(struct pqi_event_config,
10116 num_event_descriptors) != 2);
10117 BUILD_BUG_ON(offsetof(struct pqi_event_config,
10118 descriptors) != 4);
10120 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
10121 ARRAY_SIZE(pqi_supported_event_types));
10123 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10124 header.iu_type) != 0);
10125 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10126 header.iu_length) != 2);
10127 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10129 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10131 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10132 additional_event_id) != 12);
10133 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10135 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
10137 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10138 header.iu_type) != 0);
10139 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10140 header.iu_length) != 2);
10141 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10143 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10145 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10146 additional_event_id) != 12);
10147 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
10149 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10150 header.iu_type) != 0);
10151 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10152 header.iu_length) != 2);
10153 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10155 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10157 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10159 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10160 lun_number) != 16);
10161 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10162 protocol_specific) != 24);
10163 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10164 outbound_queue_id_to_manage) != 26);
10165 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10166 request_id_to_manage) != 28);
10167 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10168 task_management_function) != 30);
10169 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
10171 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10172 header.iu_type) != 0);
10173 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10174 header.iu_length) != 2);
10175 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10177 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10179 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10180 additional_response_info) != 12);
10181 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10182 response_code) != 15);
10183 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
10185 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10186 configured_logical_drive_count) != 0);
10187 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10188 configuration_signature) != 1);
10189 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10190 firmware_version_short) != 5);
10191 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10192 extended_logical_unit_count) != 154);
10193 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10194 firmware_build_number) != 190);
10195 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10196 vendor_id) != 200);
10197 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10198 product_id) != 208);
10199 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10200 extra_controller_flags) != 286);
10201 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10202 controller_mode) != 292);
10203 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10204 spare_part_number) != 293);
10205 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10206 firmware_version_long) != 325);
10208 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10209 phys_bay_in_box) != 115);
10210 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10211 device_type) != 120);
10212 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10213 redundant_path_present_map) != 1736);
10214 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10215 active_path_number) != 1738);
10216 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10217 alternate_paths_phys_connector) != 1739);
10218 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10219 alternate_paths_phys_box_on_port) != 1755);
10220 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10221 current_queue_depth_limit) != 1796);
10222 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
10224 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
10225 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10227 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10228 subpage_code) != 1);
10229 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10230 buffer_length) != 2);
10232 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
10233 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10235 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10236 subpage_code) != 1);
10237 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10238 page_length) != 2);
10240 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
10242 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10244 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10245 firmware_read_support) != 4);
10246 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10247 driver_read_support) != 5);
10248 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10249 firmware_write_support) != 6);
10250 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10251 driver_write_support) != 7);
10252 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10253 max_transfer_encrypted_sas_sata) != 8);
10254 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10255 max_transfer_encrypted_nvme) != 10);
10256 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10257 max_write_raid_5_6) != 12);
10258 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10259 max_write_raid_1_10_2drive) != 14);
10260 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10261 max_write_raid_1_10_3drive) != 16);
10263 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
10264 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
10265 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
10266 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10267 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
10268 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10269 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
10270 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
10271 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10272 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
10273 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
10274 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10276 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
10277 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
10278 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);