1 // SPDX-License-Identifier: GPL-2.0
3 * driver for Microchip PQI-based storage controllers
4 * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
8 * Questions/Comments/Bugfixes to storagedev@microchip.com
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
30 #include "smartpqi_sis.h"
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
36 #define DRIVER_VERSION "2.1.12-055"
37 #define DRIVER_MAJOR 2
38 #define DRIVER_MINOR 1
39 #define DRIVER_RELEASE 12
40 #define DRIVER_REVISION 55
42 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT "smartpqi"
46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
48 #define PQI_POST_RESET_DELAY_SECS 5
49 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
51 MODULE_AUTHOR("Microchip");
52 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
54 MODULE_VERSION(DRIVER_VERSION);
55 MODULE_LICENSE("GPL");
57 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
58 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
59 static void pqi_ctrl_offline_worker(struct work_struct *work);
60 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
61 static void pqi_scan_start(struct Scsi_Host *shost);
62 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
63 struct pqi_queue_group *queue_group, enum pqi_io_path path,
64 struct pqi_io_request *io_request);
65 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
66 struct pqi_iu_header *request, unsigned int flags,
67 struct pqi_raid_error_info *error_info);
68 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
69 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
70 unsigned int cdb_length, struct pqi_queue_group *queue_group,
71 struct pqi_encryption_info *encryption_info, bool raid_bypass);
72 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
73 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
74 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
75 struct pqi_scsi_dev_raid_map_data *rmd);
76 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
77 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
78 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
79 struct pqi_scsi_dev_raid_map_data *rmd);
80 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
81 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
82 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
83 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
84 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
85 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
86 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
87 struct pqi_scsi_dev *device, unsigned long timeout_msecs);
89 /* for flags argument to pqi_submit_raid_request_synchronous() */
90 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
92 static struct scsi_transport_template *pqi_sas_transport_template;
94 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
96 enum pqi_lockup_action {
102 static enum pqi_lockup_action pqi_lockup_action = NONE;
105 enum pqi_lockup_action action;
107 } pqi_lockup_actions[] = {
122 static unsigned int pqi_supported_event_types[] = {
123 PQI_EVENT_TYPE_HOTPLUG,
124 PQI_EVENT_TYPE_HARDWARE,
125 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
126 PQI_EVENT_TYPE_LOGICAL_DEVICE,
128 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
129 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
132 static int pqi_disable_device_id_wildcards;
133 module_param_named(disable_device_id_wildcards,
134 pqi_disable_device_id_wildcards, int, 0644);
135 MODULE_PARM_DESC(disable_device_id_wildcards,
136 "Disable device ID wildcards.");
138 static int pqi_disable_heartbeat;
139 module_param_named(disable_heartbeat,
140 pqi_disable_heartbeat, int, 0644);
141 MODULE_PARM_DESC(disable_heartbeat,
142 "Disable heartbeat.");
144 static int pqi_disable_ctrl_shutdown;
145 module_param_named(disable_ctrl_shutdown,
146 pqi_disable_ctrl_shutdown, int, 0644);
147 MODULE_PARM_DESC(disable_ctrl_shutdown,
148 "Disable controller shutdown when controller locked up.");
150 static char *pqi_lockup_action_param;
151 module_param_named(lockup_action,
152 pqi_lockup_action_param, charp, 0644);
153 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
154 "\t\tSupported: none, reboot, panic\n"
155 "\t\tDefault: none");
157 static int pqi_expose_ld_first;
158 module_param_named(expose_ld_first,
159 pqi_expose_ld_first, int, 0644);
160 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
162 static int pqi_hide_vsep;
163 module_param_named(hide_vsep,
164 pqi_hide_vsep, int, 0644);
165 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
167 static char *raid_levels[] = {
177 static char *pqi_raid_level_to_string(u8 raid_level)
179 if (raid_level < ARRAY_SIZE(raid_levels))
180 return raid_levels[raid_level];
182 return "RAID UNKNOWN";
187 #define SA_RAID_1 2 /* also used for RAID 10 */
188 #define SA_RAID_5 3 /* also used for RAID 50 */
190 #define SA_RAID_6 5 /* also used for RAID 60 */
191 #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
192 #define SA_RAID_MAX SA_RAID_TRIPLE
193 #define SA_RAID_UNKNOWN 0xff
195 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
197 pqi_prep_for_scsi_done(scmd);
201 static inline void pqi_disable_write_same(struct scsi_device *sdev)
203 sdev->no_write_same = 1;
206 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
208 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
211 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
213 return !device->is_physical_device;
216 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
218 return scsi3addr[2] != 0;
221 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
223 return !ctrl_info->controller_online;
226 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
228 if (ctrl_info->controller_online)
229 if (!sis_is_firmware_running(ctrl_info))
230 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
233 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
235 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
238 #define PQI_DRIVER_SCRATCH_PQI_MODE 0x1
239 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2
241 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
243 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
246 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
247 enum pqi_ctrl_mode mode)
251 driver_scratch = sis_read_driver_scratch(ctrl_info);
253 if (mode == PQI_MODE)
254 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
256 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
258 sis_write_driver_scratch(ctrl_info, driver_scratch);
261 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
263 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
266 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
270 driver_scratch = sis_read_driver_scratch(ctrl_info);
273 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
275 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
277 sis_write_driver_scratch(ctrl_info, driver_scratch);
280 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
282 ctrl_info->scan_blocked = true;
283 mutex_lock(&ctrl_info->scan_mutex);
286 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
288 ctrl_info->scan_blocked = false;
289 mutex_unlock(&ctrl_info->scan_mutex);
292 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
294 return ctrl_info->scan_blocked;
297 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
299 mutex_lock(&ctrl_info->lun_reset_mutex);
302 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
304 mutex_unlock(&ctrl_info->lun_reset_mutex);
307 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
309 struct Scsi_Host *shost;
310 unsigned int num_loops;
313 shost = ctrl_info->scsi_host;
315 scsi_block_requests(shost);
319 while (scsi_host_busy(shost)) {
327 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
329 scsi_unblock_requests(ctrl_info->scsi_host);
332 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
334 atomic_inc(&ctrl_info->num_busy_threads);
337 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
339 atomic_dec(&ctrl_info->num_busy_threads);
342 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
344 return ctrl_info->block_requests;
347 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
349 ctrl_info->block_requests = true;
352 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
354 ctrl_info->block_requests = false;
355 wake_up_all(&ctrl_info->block_requests_wait);
358 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
360 if (!pqi_ctrl_blocked(ctrl_info))
363 atomic_inc(&ctrl_info->num_blocked_threads);
364 wait_event(ctrl_info->block_requests_wait,
365 !pqi_ctrl_blocked(ctrl_info));
366 atomic_dec(&ctrl_info->num_blocked_threads);
369 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
371 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
373 unsigned long start_jiffies;
374 unsigned long warning_timeout;
375 bool displayed_warning;
377 displayed_warning = false;
378 start_jiffies = jiffies;
379 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
381 while (atomic_read(&ctrl_info->num_busy_threads) >
382 atomic_read(&ctrl_info->num_blocked_threads)) {
383 if (time_after(jiffies, warning_timeout)) {
384 dev_warn(&ctrl_info->pci_dev->dev,
385 "waiting %u seconds for driver activity to quiesce\n",
386 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
387 displayed_warning = true;
388 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
390 usleep_range(1000, 2000);
393 if (displayed_warning)
394 dev_warn(&ctrl_info->pci_dev->dev,
395 "driver activity quiesced after waiting for %u seconds\n",
396 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
399 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
401 return device->device_offline;
404 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
406 mutex_lock(&ctrl_info->ofa_mutex);
409 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
411 mutex_unlock(&ctrl_info->ofa_mutex);
414 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
416 mutex_lock(&ctrl_info->ofa_mutex);
417 mutex_unlock(&ctrl_info->ofa_mutex);
420 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
422 return mutex_is_locked(&ctrl_info->ofa_mutex);
425 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
427 device->in_remove = true;
430 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
432 return device->in_remove;
435 static inline int pqi_event_type_to_event_index(unsigned int event_type)
439 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
440 if (event_type == pqi_supported_event_types[index])
446 static inline bool pqi_is_supported_event(unsigned int event_type)
448 return pqi_event_type_to_event_index(event_type) != -1;
451 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
454 if (pqi_ctrl_offline(ctrl_info))
457 schedule_delayed_work(&ctrl_info->rescan_work, delay);
460 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
462 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
465 #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
467 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
469 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
472 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
474 cancel_delayed_work_sync(&ctrl_info->rescan_work);
477 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
479 if (!ctrl_info->heartbeat_counter)
482 return readl(ctrl_info->heartbeat_counter);
485 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
487 return readb(ctrl_info->soft_reset_status);
490 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
494 status = pqi_read_soft_reset_status(ctrl_info);
495 status &= ~PQI_SOFT_RESET_ABORT;
496 writeb(status, ctrl_info->soft_reset_status);
499 static int pqi_map_single(struct pci_dev *pci_dev,
500 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
501 size_t buffer_length, enum dma_data_direction data_direction)
503 dma_addr_t bus_address;
505 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
508 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
510 if (dma_mapping_error(&pci_dev->dev, bus_address))
513 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
514 put_unaligned_le32(buffer_length, &sg_descriptor->length);
515 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
520 static void pqi_pci_unmap(struct pci_dev *pci_dev,
521 struct pqi_sg_descriptor *descriptors, int num_descriptors,
522 enum dma_data_direction data_direction)
526 if (data_direction == DMA_NONE)
529 for (i = 0; i < num_descriptors; i++)
530 dma_unmap_single(&pci_dev->dev,
531 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
532 get_unaligned_le32(&descriptors[i].length),
536 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
537 struct pqi_raid_path_request *request, u8 cmd,
538 u8 *scsi3addr, void *buffer, size_t buffer_length,
539 u16 vpd_page, enum dma_data_direction *dir)
542 size_t cdb_length = buffer_length;
544 memset(request, 0, sizeof(*request));
546 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
547 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
548 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
549 &request->header.iu_length);
550 put_unaligned_le32(buffer_length, &request->buffer_length);
551 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
552 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
553 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
558 case TEST_UNIT_READY:
559 request->data_direction = SOP_READ_FLAG;
560 cdb[0] = TEST_UNIT_READY;
563 request->data_direction = SOP_READ_FLAG;
565 if (vpd_page & VPD_PAGE) {
567 cdb[2] = (u8)vpd_page;
569 cdb[4] = (u8)cdb_length;
571 case CISS_REPORT_LOG:
572 case CISS_REPORT_PHYS:
573 request->data_direction = SOP_READ_FLAG;
575 if (cmd == CISS_REPORT_PHYS) {
576 if (ctrl_info->rpl_extended_format_4_5_supported)
577 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
579 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
581 cdb[1] = ctrl_info->ciss_report_log_flags;
583 put_unaligned_be32(cdb_length, &cdb[6]);
585 case CISS_GET_RAID_MAP:
586 request->data_direction = SOP_READ_FLAG;
588 cdb[1] = CISS_GET_RAID_MAP;
589 put_unaligned_be32(cdb_length, &cdb[6]);
592 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
593 request->data_direction = SOP_WRITE_FLAG;
595 cdb[6] = BMIC_FLUSH_CACHE;
596 put_unaligned_be16(cdb_length, &cdb[7]);
598 case BMIC_SENSE_DIAG_OPTIONS:
601 case BMIC_IDENTIFY_CONTROLLER:
602 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
603 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
604 case BMIC_SENSE_FEATURE:
605 request->data_direction = SOP_READ_FLAG;
608 put_unaligned_be16(cdb_length, &cdb[7]);
610 case BMIC_SET_DIAG_OPTIONS:
613 case BMIC_WRITE_HOST_WELLNESS:
614 request->data_direction = SOP_WRITE_FLAG;
617 put_unaligned_be16(cdb_length, &cdb[7]);
619 case BMIC_CSMI_PASSTHRU:
620 request->data_direction = SOP_BIDIRECTIONAL;
622 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
624 put_unaligned_be16(cdb_length, &cdb[7]);
627 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
631 switch (request->data_direction) {
633 *dir = DMA_FROM_DEVICE;
636 *dir = DMA_TO_DEVICE;
638 case SOP_NO_DIRECTION_FLAG:
642 *dir = DMA_BIDIRECTIONAL;
646 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
647 buffer, buffer_length, *dir);
650 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
652 io_request->scmd = NULL;
653 io_request->status = 0;
654 io_request->error_info = NULL;
655 io_request->raid_bypass = false;
658 static struct pqi_io_request *pqi_alloc_io_request(
659 struct pqi_ctrl_info *ctrl_info)
661 struct pqi_io_request *io_request;
662 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
665 io_request = &ctrl_info->io_request_pool[i];
666 if (atomic_inc_return(&io_request->refcount) == 1)
668 atomic_dec(&io_request->refcount);
669 i = (i + 1) % ctrl_info->max_io_slots;
673 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
675 pqi_reinit_io_request(io_request);
680 static void pqi_free_io_request(struct pqi_io_request *io_request)
682 atomic_dec(&io_request->refcount);
685 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
686 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
687 struct pqi_raid_error_info *error_info)
690 struct pqi_raid_path_request request;
691 enum dma_data_direction dir;
693 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
694 buffer, buffer_length, vpd_page, &dir);
698 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
700 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
705 /* helper functions for pqi_send_scsi_raid_request */
707 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
708 u8 cmd, void *buffer, size_t buffer_length)
710 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
711 buffer, buffer_length, 0, NULL);
714 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
715 u8 cmd, void *buffer, size_t buffer_length,
716 struct pqi_raid_error_info *error_info)
718 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
719 buffer, buffer_length, 0, error_info);
722 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
723 struct bmic_identify_controller *buffer)
725 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
726 buffer, sizeof(*buffer));
729 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
730 struct bmic_sense_subsystem_info *sense_info)
732 return pqi_send_ctrl_raid_request(ctrl_info,
733 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
734 sizeof(*sense_info));
737 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
738 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
740 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
741 buffer, buffer_length, vpd_page, NULL);
744 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
745 struct pqi_scsi_dev *device,
746 struct bmic_identify_physical_device *buffer, size_t buffer_length)
749 enum dma_data_direction dir;
750 u16 bmic_device_index;
751 struct pqi_raid_path_request request;
753 rc = pqi_build_raid_path_request(ctrl_info, &request,
754 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
755 buffer_length, 0, &dir);
759 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
760 request.cdb[2] = (u8)bmic_device_index;
761 request.cdb[9] = (u8)(bmic_device_index >> 8);
763 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
765 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
770 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
774 bytes = get_unaligned_le16(limit);
785 struct bmic_sense_feature_buffer {
786 struct bmic_sense_feature_buffer_header header;
787 struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
792 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
793 offsetofend(struct bmic_sense_feature_buffer, \
794 aio_subpage.max_write_raid_1_10_3drive)
796 #define MINIMUM_AIO_SUBPAGE_LENGTH \
797 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
798 max_write_raid_1_10_3drive) - \
799 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
801 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
804 enum dma_data_direction dir;
805 struct pqi_raid_path_request request;
806 struct bmic_sense_feature_buffer *buffer;
808 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
812 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
813 buffer, sizeof(*buffer), 0, &dir);
817 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
818 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
820 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
822 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
827 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
828 buffer->header.subpage_code !=
829 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
830 get_unaligned_le16(&buffer->header.buffer_length) <
831 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
832 buffer->aio_subpage.header.page_code !=
833 BMIC_SENSE_FEATURE_IO_PAGE ||
834 buffer->aio_subpage.header.subpage_code !=
835 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
836 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
837 MINIMUM_AIO_SUBPAGE_LENGTH) {
841 ctrl_info->max_transfer_encrypted_sas_sata =
842 pqi_aio_limit_to_bytes(
843 &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
845 ctrl_info->max_transfer_encrypted_nvme =
846 pqi_aio_limit_to_bytes(
847 &buffer->aio_subpage.max_transfer_encrypted_nvme);
849 ctrl_info->max_write_raid_5_6 =
850 pqi_aio_limit_to_bytes(
851 &buffer->aio_subpage.max_write_raid_5_6);
853 ctrl_info->max_write_raid_1_10_2drive =
854 pqi_aio_limit_to_bytes(
855 &buffer->aio_subpage.max_write_raid_1_10_2drive);
857 ctrl_info->max_write_raid_1_10_3drive =
858 pqi_aio_limit_to_bytes(
859 &buffer->aio_subpage.max_write_raid_1_10_3drive);
867 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
868 enum bmic_flush_cache_shutdown_event shutdown_event)
871 struct bmic_flush_cache *flush_cache;
873 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
877 flush_cache->shutdown_event = shutdown_event;
879 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
880 sizeof(*flush_cache));
887 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
888 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
889 struct pqi_raid_error_info *error_info)
891 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
892 buffer, buffer_length, error_info);
895 #define PQI_FETCH_PTRAID_DATA (1 << 31)
897 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
900 struct bmic_diag_options *diag;
902 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
906 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
907 diag, sizeof(*diag));
911 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
913 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
922 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
923 void *buffer, size_t buffer_length)
925 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
926 buffer, buffer_length);
931 struct bmic_host_wellness_driver_version {
933 u8 driver_version_tag[2];
934 __le16 driver_version_length;
935 char driver_version[32];
936 u8 dont_write_tag[2];
942 static int pqi_write_driver_version_to_host_wellness(
943 struct pqi_ctrl_info *ctrl_info)
946 struct bmic_host_wellness_driver_version *buffer;
947 size_t buffer_length;
949 buffer_length = sizeof(*buffer);
951 buffer = kmalloc(buffer_length, GFP_KERNEL);
955 buffer->start_tag[0] = '<';
956 buffer->start_tag[1] = 'H';
957 buffer->start_tag[2] = 'W';
958 buffer->start_tag[3] = '>';
959 buffer->driver_version_tag[0] = 'D';
960 buffer->driver_version_tag[1] = 'V';
961 put_unaligned_le16(sizeof(buffer->driver_version),
962 &buffer->driver_version_length);
963 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
964 sizeof(buffer->driver_version) - 1);
965 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
966 buffer->dont_write_tag[0] = 'D';
967 buffer->dont_write_tag[1] = 'W';
968 buffer->end_tag[0] = 'Z';
969 buffer->end_tag[1] = 'Z';
971 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
980 struct bmic_host_wellness_time {
985 u8 dont_write_tag[2];
991 static int pqi_write_current_time_to_host_wellness(
992 struct pqi_ctrl_info *ctrl_info)
995 struct bmic_host_wellness_time *buffer;
996 size_t buffer_length;
1001 buffer_length = sizeof(*buffer);
1003 buffer = kmalloc(buffer_length, GFP_KERNEL);
1007 buffer->start_tag[0] = '<';
1008 buffer->start_tag[1] = 'H';
1009 buffer->start_tag[2] = 'W';
1010 buffer->start_tag[3] = '>';
1011 buffer->time_tag[0] = 'T';
1012 buffer->time_tag[1] = 'D';
1013 put_unaligned_le16(sizeof(buffer->time),
1014 &buffer->time_length);
1016 local_time = ktime_get_real_seconds();
1017 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
1018 year = tm.tm_year + 1900;
1020 buffer->time[0] = bin2bcd(tm.tm_hour);
1021 buffer->time[1] = bin2bcd(tm.tm_min);
1022 buffer->time[2] = bin2bcd(tm.tm_sec);
1023 buffer->time[3] = 0;
1024 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1025 buffer->time[5] = bin2bcd(tm.tm_mday);
1026 buffer->time[6] = bin2bcd(year / 100);
1027 buffer->time[7] = bin2bcd(year % 100);
1029 buffer->dont_write_tag[0] = 'D';
1030 buffer->dont_write_tag[1] = 'W';
1031 buffer->end_tag[0] = 'Z';
1032 buffer->end_tag[1] = 'Z';
1034 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1041 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
1043 static void pqi_update_time_worker(struct work_struct *work)
1046 struct pqi_ctrl_info *ctrl_info;
1048 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1051 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1053 dev_warn(&ctrl_info->pci_dev->dev,
1054 "error updating time on controller\n");
1056 schedule_delayed_work(&ctrl_info->update_time_work,
1057 PQI_UPDATE_TIME_WORK_INTERVAL);
1060 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1062 schedule_delayed_work(&ctrl_info->update_time_work, 0);
1065 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1067 cancel_delayed_work_sync(&ctrl_info->update_time_work);
1070 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1071 size_t buffer_length)
1073 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1076 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1079 size_t lun_list_length;
1080 size_t lun_data_length;
1081 size_t new_lun_list_length;
1082 void *lun_data = NULL;
1083 struct report_lun_header *report_lun_header;
1085 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1086 if (!report_lun_header) {
1091 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1095 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1098 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1100 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1106 if (lun_list_length == 0) {
1107 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1111 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1115 new_lun_list_length =
1116 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1118 if (new_lun_list_length > lun_list_length) {
1119 lun_list_length = new_lun_list_length;
1125 kfree(report_lun_header);
1137 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1141 u8 rpl_response_format;
1143 size_t rpl_16byte_wwid_list_length;
1145 struct report_lun_header *rpl_header;
1146 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1147 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1149 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1153 if (ctrl_info->rpl_extended_format_4_5_supported) {
1154 rpl_header = rpl_list;
1155 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1156 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1159 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1160 dev_err(&ctrl_info->pci_dev->dev,
1161 "RPL returned unsupported data format %u\n",
1162 rpl_response_format);
1165 dev_warn(&ctrl_info->pci_dev->dev,
1166 "RPL returned extended format 2 instead of 4\n");
1170 rpl_8byte_wwid_list = rpl_list;
1171 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1172 rpl_16byte_wwid_list_length = sizeof(struct report_lun_header) + (num_physicals * sizeof(struct report_phys_lun_16byte_wwid));
1174 rpl_16byte_wwid_list = kmalloc(rpl_16byte_wwid_list_length, GFP_KERNEL);
1175 if (!rpl_16byte_wwid_list)
1178 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1179 &rpl_16byte_wwid_list->header.list_length);
1180 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1182 for (i = 0; i < num_physicals; i++) {
1183 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
1184 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid, 0, 8);
1185 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1186 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1187 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1188 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1189 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1190 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1193 kfree(rpl_8byte_wwid_list);
1194 *buffer = rpl_16byte_wwid_list;
1199 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1201 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1204 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1205 struct report_phys_lun_16byte_wwid_list **physdev_list,
1206 struct report_log_lun_list **logdev_list)
1209 size_t logdev_list_length;
1210 size_t logdev_data_length;
1211 struct report_log_lun_list *internal_logdev_list;
1212 struct report_log_lun_list *logdev_data;
1213 struct report_lun_header report_lun_header;
1215 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1217 dev_err(&ctrl_info->pci_dev->dev,
1218 "report physical LUNs failed\n");
1220 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1222 dev_err(&ctrl_info->pci_dev->dev,
1223 "report logical LUNs failed\n");
1226 * Tack the controller itself onto the end of the logical device list.
1229 logdev_data = *logdev_list;
1232 logdev_list_length =
1233 get_unaligned_be32(&logdev_data->header.list_length);
1235 memset(&report_lun_header, 0, sizeof(report_lun_header));
1237 (struct report_log_lun_list *)&report_lun_header;
1238 logdev_list_length = 0;
1241 logdev_data_length = sizeof(struct report_lun_header) +
1244 internal_logdev_list = kmalloc(logdev_data_length +
1245 sizeof(struct report_log_lun), GFP_KERNEL);
1246 if (!internal_logdev_list) {
1247 kfree(*logdev_list);
1248 *logdev_list = NULL;
1252 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1253 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1254 sizeof(struct report_log_lun));
1255 put_unaligned_be32(logdev_list_length +
1256 sizeof(struct report_log_lun),
1257 &internal_logdev_list->header.list_length);
1259 kfree(*logdev_list);
1260 *logdev_list = internal_logdev_list;
1265 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1266 int bus, int target, int lun)
1269 device->target = target;
1273 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1281 scsi3addr = device->scsi3addr;
1282 lunid = get_unaligned_le32(scsi3addr);
1284 if (pqi_is_hba_lunid(scsi3addr)) {
1285 /* The specified device is the controller. */
1286 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1287 device->target_lun_valid = true;
1291 if (pqi_is_logical_device(device)) {
1292 if (device->is_external_raid_device) {
1293 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1294 target = (lunid >> 16) & 0x3fff;
1297 bus = PQI_RAID_VOLUME_BUS;
1299 lun = lunid & 0x3fff;
1301 pqi_set_bus_target_lun(device, bus, target, lun);
1302 device->target_lun_valid = true;
1307 * Defer target and LUN assignment for non-controller physical devices
1308 * because the SAS transport layer will make these assignments later.
1310 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1313 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1314 struct pqi_scsi_dev *device)
1320 raid_level = SA_RAID_UNKNOWN;
1322 buffer = kmalloc(64, GFP_KERNEL);
1324 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1325 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1327 raid_level = buffer[8];
1328 if (raid_level > SA_RAID_MAX)
1329 raid_level = SA_RAID_UNKNOWN;
1334 device->raid_level = raid_level;
1337 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1338 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1342 u32 r5or6_blocks_per_row;
1344 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1346 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1347 err_msg = "RAID map too small";
1351 if (device->raid_level == SA_RAID_1) {
1352 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1353 err_msg = "invalid RAID-1 map";
1356 } else if (device->raid_level == SA_RAID_TRIPLE) {
1357 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1358 err_msg = "invalid RAID-1(Triple) map";
1361 } else if ((device->raid_level == SA_RAID_5 ||
1362 device->raid_level == SA_RAID_6) &&
1363 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1365 r5or6_blocks_per_row =
1366 get_unaligned_le16(&raid_map->strip_size) *
1367 get_unaligned_le16(&raid_map->data_disks_per_row);
1368 if (r5or6_blocks_per_row == 0) {
1369 err_msg = "invalid RAID-5 or RAID-6 map";
1377 dev_warn(&ctrl_info->pci_dev->dev,
1378 "logical device %08x%08x %s\n",
1379 *((u32 *)&device->scsi3addr),
1380 *((u32 *)&device->scsi3addr[4]), err_msg);
1385 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1386 struct pqi_scsi_dev *device)
1390 struct raid_map *raid_map;
1392 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1396 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1397 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1401 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1403 if (raid_map_size > sizeof(*raid_map)) {
1407 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1411 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1412 device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1416 if (get_unaligned_le32(&raid_map->structure_size)
1418 dev_warn(&ctrl_info->pci_dev->dev,
1419 "requested %u bytes, received %u bytes\n",
1421 get_unaligned_le32(&raid_map->structure_size));
1427 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1431 device->raid_map = raid_map;
1441 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1442 struct pqi_scsi_dev *device)
1444 if (!ctrl_info->lv_drive_type_mix_valid) {
1445 device->max_transfer_encrypted = ~0;
1449 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1450 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1451 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1452 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1453 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1454 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1455 case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1456 case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1457 device->max_transfer_encrypted =
1458 ctrl_info->max_transfer_encrypted_sas_sata;
1460 case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1461 device->max_transfer_encrypted =
1462 ctrl_info->max_transfer_encrypted_nvme;
1464 case LV_DRIVE_TYPE_MIX_UNKNOWN:
1465 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1467 device->max_transfer_encrypted =
1468 min(ctrl_info->max_transfer_encrypted_sas_sata,
1469 ctrl_info->max_transfer_encrypted_nvme);
1474 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1475 struct pqi_scsi_dev *device)
1481 buffer = kmalloc(64, GFP_KERNEL);
1485 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1486 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1490 #define RAID_BYPASS_STATUS 4
1491 #define RAID_BYPASS_CONFIGURED 0x1
1492 #define RAID_BYPASS_ENABLED 0x2
1494 bypass_status = buffer[RAID_BYPASS_STATUS];
1495 device->raid_bypass_configured =
1496 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1497 if (device->raid_bypass_configured &&
1498 (bypass_status & RAID_BYPASS_ENABLED) &&
1499 pqi_get_raid_map(ctrl_info, device) == 0) {
1500 device->raid_bypass_enabled = true;
1501 if (get_unaligned_le16(&device->raid_map->flags) &
1502 RAID_MAP_ENCRYPTION_ENABLED)
1503 pqi_set_max_transfer_encrypted(ctrl_info, device);
1511 * Use vendor-specific VPD to determine online/offline status of a volume.
1514 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1515 struct pqi_scsi_dev *device)
1519 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1520 bool volume_offline = true;
1522 struct ciss_vpd_logical_volume_status *vpd;
1524 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1528 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1529 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1533 if (vpd->page_code != CISS_VPD_LV_STATUS)
1536 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1537 volume_status) + vpd->page_length;
1538 if (page_length < sizeof(*vpd))
1541 volume_status = vpd->volume_status;
1542 volume_flags = get_unaligned_be32(&vpd->flags);
1543 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1548 device->volume_status = volume_status;
1549 device->volume_offline = volume_offline;
1552 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
1554 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1555 struct pqi_scsi_dev *device,
1556 struct bmic_identify_physical_device *id_phys)
1560 memset(id_phys, 0, sizeof(*id_phys));
1562 rc = pqi_identify_physical_device(ctrl_info, device,
1563 id_phys, sizeof(*id_phys));
1565 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1569 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1570 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1572 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1573 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1575 device->box_index = id_phys->box_index;
1576 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1577 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1578 device->queue_depth =
1579 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1580 device->active_path_index = id_phys->active_path_number;
1581 device->path_map = id_phys->redundant_path_present_map;
1582 memcpy(&device->box,
1583 &id_phys->alternate_paths_phys_box_on_port,
1584 sizeof(device->box));
1585 memcpy(&device->phys_connector,
1586 &id_phys->alternate_paths_phys_connector,
1587 sizeof(device->phys_connector));
1588 device->bay = id_phys->phys_bay_in_box;
1590 memcpy(&device->page_83_identifier, &id_phys->page_83_identifier,
1591 sizeof(device->page_83_identifier));
1593 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1596 id_phys->phy_to_phy_map[device->active_path_index];
1598 device->phy_id = 0xFF;
1603 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1604 struct pqi_scsi_dev *device)
1609 buffer = kmalloc(64, GFP_KERNEL);
1613 /* Send an inquiry to the device to see what it is. */
1614 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1618 scsi_sanitize_inquiry_string(&buffer[8], 8);
1619 scsi_sanitize_inquiry_string(&buffer[16], 16);
1621 device->devtype = buffer[0] & 0x1f;
1622 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1623 memcpy(device->model, &buffer[16], sizeof(device->model));
1625 if (device->devtype == TYPE_DISK) {
1626 if (device->is_external_raid_device) {
1627 device->raid_level = SA_RAID_UNKNOWN;
1628 device->volume_status = CISS_LV_OK;
1629 device->volume_offline = false;
1631 pqi_get_raid_level(ctrl_info, device);
1632 pqi_get_raid_bypass_status(ctrl_info, device);
1633 pqi_get_volume_status(ctrl_info, device);
1644 * Prevent adding drive to OS for some corner cases such as a drive
1645 * undergoing a sanitize operation. Some OSes will continue to poll
1646 * the drive until the sanitize completes, which can take hours,
1647 * resulting in long bootup delays. Commands such as TUR, READ_CAP
1648 * are allowed, but READ/WRITE cause check condition. So the OS
1649 * cannot check/read the partition table.
1650 * Note: devices that have completed sanitize must be re-enabled
1651 * using the management utility.
1653 static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info,
1654 struct pqi_scsi_dev *device)
1658 enum dma_data_direction dir;
1660 int buffer_length = 64;
1661 size_t sense_data_length;
1662 struct scsi_sense_hdr sshdr;
1663 struct pqi_raid_path_request request;
1664 struct pqi_raid_error_info error_info;
1665 bool offline = false; /* Assume keep online */
1667 /* Do not check controllers. */
1668 if (pqi_is_hba_lunid(device->scsi3addr))
1671 /* Do not check LVs. */
1672 if (pqi_is_logical_device(device))
1675 buffer = kmalloc(buffer_length, GFP_KERNEL);
1677 return false; /* Assume not offline */
1679 /* Check for SANITIZE in progress using TUR */
1680 rc = pqi_build_raid_path_request(ctrl_info, &request,
1681 TEST_UNIT_READY, RAID_CTLR_LUNID, buffer,
1682 buffer_length, 0, &dir);
1684 goto out; /* Assume not offline */
1686 memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number));
1688 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info);
1691 goto out; /* Assume not offline */
1693 scsi_status = error_info.status;
1694 sense_data_length = get_unaligned_le16(&error_info.sense_data_length);
1695 if (sense_data_length == 0)
1697 get_unaligned_le16(&error_info.response_data_length);
1698 if (sense_data_length) {
1699 if (sense_data_length > sizeof(error_info.data))
1700 sense_data_length = sizeof(error_info.data);
1703 * Check for sanitize in progress: asc:0x04, ascq: 0x1b
1705 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
1706 scsi_normalize_sense(error_info.data,
1707 sense_data_length, &sshdr) &&
1708 sshdr.sense_key == NOT_READY &&
1709 sshdr.asc == 0x04 &&
1710 sshdr.ascq == 0x1b) {
1711 device->device_offline = true;
1713 goto out; /* Keep device offline */
1722 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1723 struct pqi_scsi_dev *device,
1724 struct bmic_identify_physical_device *id_phys)
1728 if (device->is_expander_smp_device)
1731 if (pqi_is_logical_device(device))
1732 rc = pqi_get_logical_device_info(ctrl_info, device);
1734 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1739 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1740 struct pqi_scsi_dev *device)
1743 static const char unknown_state_str[] =
1744 "Volume is in an unknown state (%u)";
1745 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1747 switch (device->volume_status) {
1749 status = "Volume online";
1751 case CISS_LV_FAILED:
1752 status = "Volume failed";
1754 case CISS_LV_NOT_CONFIGURED:
1755 status = "Volume not configured";
1757 case CISS_LV_DEGRADED:
1758 status = "Volume degraded";
1760 case CISS_LV_READY_FOR_RECOVERY:
1761 status = "Volume ready for recovery operation";
1763 case CISS_LV_UNDERGOING_RECOVERY:
1764 status = "Volume undergoing recovery";
1766 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1767 status = "Wrong physical drive was replaced";
1769 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1770 status = "A physical drive not properly connected";
1772 case CISS_LV_HARDWARE_OVERHEATING:
1773 status = "Hardware is overheating";
1775 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1776 status = "Hardware has overheated";
1778 case CISS_LV_UNDERGOING_EXPANSION:
1779 status = "Volume undergoing expansion";
1781 case CISS_LV_NOT_AVAILABLE:
1782 status = "Volume waiting for transforming volume";
1784 case CISS_LV_QUEUED_FOR_EXPANSION:
1785 status = "Volume queued for expansion";
1787 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1788 status = "Volume disabled due to SCSI ID conflict";
1790 case CISS_LV_EJECTED:
1791 status = "Volume has been ejected";
1793 case CISS_LV_UNDERGOING_ERASE:
1794 status = "Volume undergoing background erase";
1796 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1797 status = "Volume ready for predictive spare rebuild";
1799 case CISS_LV_UNDERGOING_RPI:
1800 status = "Volume undergoing rapid parity initialization";
1802 case CISS_LV_PENDING_RPI:
1803 status = "Volume queued for rapid parity initialization";
1805 case CISS_LV_ENCRYPTED_NO_KEY:
1806 status = "Encrypted volume inaccessible - key not present";
1808 case CISS_LV_UNDERGOING_ENCRYPTION:
1809 status = "Volume undergoing encryption process";
1811 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1812 status = "Volume undergoing encryption re-keying process";
1814 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1815 status = "Volume encrypted but encryption is disabled";
1817 case CISS_LV_PENDING_ENCRYPTION:
1818 status = "Volume pending migration to encrypted state";
1820 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1821 status = "Volume pending encryption rekeying";
1823 case CISS_LV_NOT_SUPPORTED:
1824 status = "Volume not supported on this controller";
1826 case CISS_LV_STATUS_UNAVAILABLE:
1827 status = "Volume status not available";
1830 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1831 unknown_state_str, device->volume_status);
1832 status = unknown_state_buffer;
1836 dev_info(&ctrl_info->pci_dev->dev,
1837 "scsi %d:%d:%d:%d %s\n",
1838 ctrl_info->scsi_host->host_no,
1839 device->bus, device->target, device->lun, status);
1842 static void pqi_rescan_worker(struct work_struct *work)
1844 struct pqi_ctrl_info *ctrl_info;
1846 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1849 pqi_scan_scsi_devices(ctrl_info);
1852 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1853 struct pqi_scsi_dev *device)
1857 if (pqi_is_logical_device(device))
1858 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1859 device->target, device->lun);
1861 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1866 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
1868 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1872 rc = pqi_device_wait_for_pending_io(ctrl_info, device,
1873 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1875 dev_err(&ctrl_info->pci_dev->dev,
1876 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1877 ctrl_info->scsi_host->host_no, device->bus,
1878 device->target, device->lun,
1879 atomic_read(&device->scsi_cmds_outstanding));
1881 if (pqi_is_logical_device(device))
1882 scsi_remove_device(device->sdev);
1884 pqi_remove_sas_device(device);
1886 pqi_device_remove_start(device);
1889 /* Assumes the SCSI device list lock is held. */
1891 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1892 int bus, int target, int lun)
1894 struct pqi_scsi_dev *device;
1896 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1897 if (device->bus == bus && device->target == target && device->lun == lun)
1903 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1905 if (dev1->is_physical_device != dev2->is_physical_device)
1908 if (dev1->is_physical_device)
1909 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
1911 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1914 enum pqi_find_result {
1920 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1921 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1923 struct pqi_scsi_dev *device;
1925 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1926 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1927 *matching_device = device;
1928 if (pqi_device_equal(device_to_find, device)) {
1929 if (device_to_find->volume_offline)
1930 return DEVICE_CHANGED;
1933 return DEVICE_CHANGED;
1937 return DEVICE_NOT_FOUND;
1940 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1942 if (device->is_expander_smp_device)
1943 return "Enclosure SMP ";
1945 return scsi_device_type(device->devtype);
1948 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1950 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1951 char *action, struct pqi_scsi_dev *device)
1954 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1956 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1957 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1959 if (device->target_lun_valid)
1960 count += scnprintf(buffer + count,
1961 PQI_DEV_INFO_BUFFER_LENGTH - count,
1966 count += scnprintf(buffer + count,
1967 PQI_DEV_INFO_BUFFER_LENGTH - count,
1970 if (pqi_is_logical_device(device))
1971 count += scnprintf(buffer + count,
1972 PQI_DEV_INFO_BUFFER_LENGTH - count,
1974 *((u32 *)&device->scsi3addr),
1975 *((u32 *)&device->scsi3addr[4]));
1977 count += scnprintf(buffer + count,
1978 PQI_DEV_INFO_BUFFER_LENGTH - count,
1980 get_unaligned_be64(&device->wwid[0]),
1981 get_unaligned_be64(&device->wwid[8]));
1983 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1985 pqi_device_type(device),
1989 if (pqi_is_logical_device(device)) {
1990 if (device->devtype == TYPE_DISK)
1991 count += scnprintf(buffer + count,
1992 PQI_DEV_INFO_BUFFER_LENGTH - count,
1993 "SSDSmartPathCap%c En%c %-12s",
1994 device->raid_bypass_configured ? '+' : '-',
1995 device->raid_bypass_enabled ? '+' : '-',
1996 pqi_raid_level_to_string(device->raid_level));
1998 count += scnprintf(buffer + count,
1999 PQI_DEV_INFO_BUFFER_LENGTH - count,
2000 "AIO%c", device->aio_enabled ? '+' : '-');
2001 if (device->devtype == TYPE_DISK ||
2002 device->devtype == TYPE_ZBC)
2003 count += scnprintf(buffer + count,
2004 PQI_DEV_INFO_BUFFER_LENGTH - count,
2005 " qd=%-6d", device->queue_depth);
2008 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
2011 /* Assumes the SCSI device list lock is held. */
2013 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
2014 struct pqi_scsi_dev *new_device)
2016 existing_device->device_type = new_device->device_type;
2017 existing_device->bus = new_device->bus;
2018 if (new_device->target_lun_valid) {
2019 existing_device->target = new_device->target;
2020 existing_device->lun = new_device->lun;
2021 existing_device->target_lun_valid = true;
2024 if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION ||
2025 existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) &&
2026 new_device->volume_status == CISS_LV_OK)
2027 existing_device->rescan = true;
2029 /* By definition, the scsi3addr and wwid fields are already the same. */
2031 existing_device->is_physical_device = new_device->is_physical_device;
2032 existing_device->is_external_raid_device =
2033 new_device->is_external_raid_device;
2034 existing_device->is_expander_smp_device =
2035 new_device->is_expander_smp_device;
2036 existing_device->aio_enabled = new_device->aio_enabled;
2037 memcpy(existing_device->vendor, new_device->vendor,
2038 sizeof(existing_device->vendor));
2039 memcpy(existing_device->model, new_device->model,
2040 sizeof(existing_device->model));
2041 existing_device->sas_address = new_device->sas_address;
2042 existing_device->raid_level = new_device->raid_level;
2043 existing_device->queue_depth = new_device->queue_depth;
2044 existing_device->aio_handle = new_device->aio_handle;
2045 existing_device->volume_status = new_device->volume_status;
2046 existing_device->active_path_index = new_device->active_path_index;
2047 existing_device->phy_id = new_device->phy_id;
2048 existing_device->path_map = new_device->path_map;
2049 existing_device->bay = new_device->bay;
2050 existing_device->box_index = new_device->box_index;
2051 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
2052 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
2053 memcpy(existing_device->box, new_device->box,
2054 sizeof(existing_device->box));
2055 memcpy(existing_device->phys_connector, new_device->phys_connector,
2056 sizeof(existing_device->phys_connector));
2057 existing_device->next_bypass_group = 0;
2058 kfree(existing_device->raid_map);
2059 existing_device->raid_map = new_device->raid_map;
2060 existing_device->raid_bypass_configured =
2061 new_device->raid_bypass_configured;
2062 existing_device->raid_bypass_enabled =
2063 new_device->raid_bypass_enabled;
2064 existing_device->device_offline = false;
2066 /* To prevent this from being freed later. */
2067 new_device->raid_map = NULL;
2070 static inline void pqi_free_device(struct pqi_scsi_dev *device)
2073 kfree(device->raid_map);
2079 * Called when exposing a new device to the OS fails in order to re-adjust
2080 * our internal SCSI device list to match the SCSI ML's view.
2083 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2084 struct pqi_scsi_dev *device)
2086 unsigned long flags;
2088 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2089 list_del(&device->scsi_device_list_entry);
2090 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2092 /* Allow the device structure to be freed later. */
2093 device->keep_device = false;
2096 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2098 if (device->is_expander_smp_device)
2099 return device->sas_port != NULL;
2101 return device->sdev != NULL;
2104 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2105 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2109 unsigned long flags;
2110 enum pqi_find_result find_result;
2111 struct pqi_scsi_dev *device;
2112 struct pqi_scsi_dev *next;
2113 struct pqi_scsi_dev *matching_device;
2114 LIST_HEAD(add_list);
2115 LIST_HEAD(delete_list);
2118 * The idea here is to do as little work as possible while holding the
2119 * spinlock. That's why we go to great pains to defer anything other
2120 * than updating the internal device list until after we release the
2124 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2126 /* Assume that all devices in the existing list have gone away. */
2127 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
2128 device->device_gone = true;
2130 for (i = 0; i < num_new_devices; i++) {
2131 device = new_device_list[i];
2133 find_result = pqi_scsi_find_entry(ctrl_info, device,
2136 switch (find_result) {
2139 * The newly found device is already in the existing
2142 device->new_device = false;
2143 matching_device->device_gone = false;
2144 pqi_scsi_update_device(matching_device, device);
2146 case DEVICE_NOT_FOUND:
2148 * The newly found device is NOT in the existing device
2151 device->new_device = true;
2153 case DEVICE_CHANGED:
2155 * The original device has gone away and we need to add
2158 device->new_device = true;
2163 /* Process all devices that have gone away. */
2164 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2165 scsi_device_list_entry) {
2166 if (device->device_gone) {
2167 list_del(&device->scsi_device_list_entry);
2168 list_add_tail(&device->delete_list_entry, &delete_list);
2172 /* Process all new devices. */
2173 for (i = 0; i < num_new_devices; i++) {
2174 device = new_device_list[i];
2175 if (!device->new_device)
2177 if (device->volume_offline)
2179 list_add_tail(&device->scsi_device_list_entry,
2180 &ctrl_info->scsi_device_list);
2181 list_add_tail(&device->add_list_entry, &add_list);
2182 /* To prevent this device structure from being freed later. */
2183 device->keep_device = true;
2186 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2189 * If OFA is in progress and there are devices that need to be deleted,
2190 * allow any pending reset operations to continue and unblock any SCSI
2191 * requests before removal.
2193 if (pqi_ofa_in_progress(ctrl_info)) {
2194 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2195 if (pqi_is_device_added(device))
2196 pqi_device_remove_start(device);
2197 pqi_ctrl_unblock_device_reset(ctrl_info);
2198 pqi_scsi_unblock_requests(ctrl_info);
2201 /* Remove all devices that have gone away. */
2202 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2203 if (device->volume_offline) {
2204 pqi_dev_info(ctrl_info, "offline", device);
2205 pqi_show_volume_status(ctrl_info, device);
2207 pqi_dev_info(ctrl_info, "removed", device);
2209 if (pqi_is_device_added(device))
2210 pqi_remove_device(ctrl_info, device);
2211 list_del(&device->delete_list_entry);
2212 pqi_free_device(device);
2216 * Notify the SCSI ML if the queue depth of any existing device has
2219 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2220 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2221 device->advertised_queue_depth = device->queue_depth;
2222 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2223 if (device->rescan) {
2224 scsi_rescan_device(&device->sdev->sdev_gendev);
2225 device->rescan = false;
2230 /* Expose any new devices. */
2231 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2232 if (!pqi_is_device_added(device)) {
2233 rc = pqi_add_device(ctrl_info, device);
2235 pqi_dev_info(ctrl_info, "added", device);
2237 dev_warn(&ctrl_info->pci_dev->dev,
2238 "scsi %d:%d:%d:%d addition failed, device not added\n",
2239 ctrl_info->scsi_host->host_no,
2240 device->bus, device->target,
2242 pqi_fixup_botched_add(ctrl_info, device);
2248 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2251 * Only support the HBA controller itself as a RAID
2252 * controller. If it's a RAID controller other than
2253 * the HBA itself (an external RAID controller, for
2254 * example), we don't support it.
2256 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2257 !pqi_is_hba_lunid(device->scsi3addr))
2263 static inline bool pqi_skip_device(u8 *scsi3addr)
2265 /* Ignore all masked devices. */
2266 if (MASKED_DEVICE(scsi3addr))
2272 static inline void pqi_mask_device(u8 *scsi3addr)
2274 scsi3addr[3] |= 0xc0;
2277 static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
2279 switch (device->device_type) {
2280 case SA_DEVICE_TYPE_SAS:
2281 case SA_DEVICE_TYPE_EXPANDER_SMP:
2282 case SA_DEVICE_TYPE_SES:
2289 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2291 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2294 static inline void pqi_set_physical_device_wwid(struct pqi_ctrl_info *ctrl_info,
2295 struct pqi_scsi_dev *device, struct report_phys_lun_16byte_wwid *phys_lun)
2297 if (ctrl_info->unique_wwid_in_report_phys_lun_supported ||
2298 ctrl_info->rpl_extended_format_4_5_supported ||
2299 pqi_is_device_with_sas_address(device))
2300 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
2302 memcpy(&device->wwid[8], device->page_83_identifier, 8);
2305 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2309 LIST_HEAD(new_device_list_head);
2310 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2311 struct report_log_lun_list *logdev_list = NULL;
2312 struct report_phys_lun_16byte_wwid *phys_lun;
2313 struct report_log_lun *log_lun;
2314 struct bmic_identify_physical_device *id_phys = NULL;
2317 struct pqi_scsi_dev **new_device_list = NULL;
2318 struct pqi_scsi_dev *device;
2319 struct pqi_scsi_dev *next;
2320 unsigned int num_new_devices;
2321 unsigned int num_valid_devices;
2322 bool is_physical_device;
2324 unsigned int physical_index;
2325 unsigned int logical_index;
2326 static char *out_of_memory_msg =
2327 "failed to allocate memory, device discovery stopped";
2329 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2335 get_unaligned_be32(&physdev_list->header.list_length)
2336 / sizeof(physdev_list->lun_entries[0]);
2342 get_unaligned_be32(&logdev_list->header.list_length)
2343 / sizeof(logdev_list->lun_entries[0]);
2347 if (num_physicals) {
2349 * We need this buffer for calls to pqi_get_physical_disk_info()
2350 * below. We allocate it here instead of inside
2351 * pqi_get_physical_disk_info() because it's a fairly large
2354 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2356 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2362 if (pqi_hide_vsep) {
2363 for (i = num_physicals - 1; i >= 0; i--) {
2364 phys_lun = &physdev_list->lun_entries[i];
2365 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2366 pqi_mask_device(phys_lun->lunid);
2374 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2375 ctrl_info->lv_drive_type_mix_valid = true;
2377 num_new_devices = num_physicals + num_logicals;
2379 new_device_list = kmalloc_array(num_new_devices,
2380 sizeof(*new_device_list),
2382 if (!new_device_list) {
2383 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2388 for (i = 0; i < num_new_devices; i++) {
2389 device = kzalloc(sizeof(*device), GFP_KERNEL);
2391 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2396 list_add_tail(&device->new_device_list_entry,
2397 &new_device_list_head);
2401 num_valid_devices = 0;
2405 for (i = 0; i < num_new_devices; i++) {
2407 if ((!pqi_expose_ld_first && i < num_physicals) ||
2408 (pqi_expose_ld_first && i >= num_logicals)) {
2409 is_physical_device = true;
2410 phys_lun = &physdev_list->lun_entries[physical_index++];
2412 scsi3addr = phys_lun->lunid;
2414 is_physical_device = false;
2416 log_lun = &logdev_list->lun_entries[logical_index++];
2417 scsi3addr = log_lun->lunid;
2420 if (is_physical_device && pqi_skip_device(scsi3addr))
2424 device = list_next_entry(device, new_device_list_entry);
2426 device = list_first_entry(&new_device_list_head,
2427 struct pqi_scsi_dev, new_device_list_entry);
2429 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2430 device->is_physical_device = is_physical_device;
2431 if (is_physical_device) {
2432 device->device_type = phys_lun->device_type;
2433 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2434 device->is_expander_smp_device = true;
2436 device->is_external_raid_device =
2437 pqi_is_external_raid_addr(scsi3addr);
2440 if (!pqi_is_supported_device(device))
2443 /* Do not present disks that the OS cannot fully probe */
2444 if (pqi_keep_device_offline(ctrl_info, device))
2447 /* Gather information about the device. */
2448 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2449 if (rc == -ENOMEM) {
2450 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2455 if (device->is_physical_device)
2456 dev_warn(&ctrl_info->pci_dev->dev,
2457 "obtaining device info failed, skipping physical device %016llx%016llx\n",
2458 get_unaligned_be64(&phys_lun->wwid[0]),
2459 get_unaligned_be64(&phys_lun->wwid[8]));
2461 dev_warn(&ctrl_info->pci_dev->dev,
2462 "obtaining device info failed, skipping logical device %08x%08x\n",
2463 *((u32 *)&device->scsi3addr),
2464 *((u32 *)&device->scsi3addr[4]));
2469 pqi_assign_bus_target_lun(device);
2471 if (device->is_physical_device) {
2472 pqi_set_physical_device_wwid(ctrl_info, device, phys_lun);
2473 if ((phys_lun->device_flags &
2474 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2475 phys_lun->aio_handle) {
2476 device->aio_enabled = true;
2477 device->aio_handle =
2478 phys_lun->aio_handle;
2481 memcpy(device->volume_id, log_lun->volume_id,
2482 sizeof(device->volume_id));
2485 if (pqi_is_device_with_sas_address(device))
2486 device->sas_address = get_unaligned_be64(&device->wwid[8]);
2488 new_device_list[num_valid_devices++] = device;
2491 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2494 list_for_each_entry_safe(device, next, &new_device_list_head,
2495 new_device_list_entry) {
2496 if (device->keep_device)
2498 list_del(&device->new_device_list_entry);
2499 pqi_free_device(device);
2502 kfree(new_device_list);
2503 kfree(physdev_list);
2510 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2512 unsigned long flags;
2513 struct pqi_scsi_dev *device;
2514 struct pqi_scsi_dev *next;
2516 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2518 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2519 scsi_device_list_entry) {
2520 if (pqi_is_device_added(device))
2521 pqi_remove_device(ctrl_info, device);
2522 list_del(&device->scsi_device_list_entry);
2523 pqi_free_device(device);
2526 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2529 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2534 if (pqi_ctrl_offline(ctrl_info))
2537 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2539 if (!mutex_acquired) {
2540 if (pqi_ctrl_scan_blocked(ctrl_info))
2542 pqi_schedule_rescan_worker_delayed(ctrl_info);
2543 return -EINPROGRESS;
2546 rc = pqi_update_scsi_devices(ctrl_info);
2547 if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2548 pqi_schedule_rescan_worker_delayed(ctrl_info);
2550 mutex_unlock(&ctrl_info->scan_mutex);
2555 static void pqi_scan_start(struct Scsi_Host *shost)
2557 struct pqi_ctrl_info *ctrl_info;
2559 ctrl_info = shost_to_hba(shost);
2561 pqi_scan_scsi_devices(ctrl_info);
2564 /* Returns TRUE if scan is finished. */
2566 static int pqi_scan_finished(struct Scsi_Host *shost,
2567 unsigned long elapsed_time)
2569 struct pqi_ctrl_info *ctrl_info;
2571 ctrl_info = shost_priv(shost);
2573 return !mutex_is_locked(&ctrl_info->scan_mutex);
2576 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2577 struct raid_map *raid_map, u64 first_block)
2579 u32 volume_blk_size;
2582 * Set the encryption tweak values based on logical block address.
2583 * If the block size is 512, the tweak value is equal to the LBA.
2584 * For other block sizes, tweak value is (LBA * block size) / 512.
2586 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2587 if (volume_blk_size != 512)
2588 first_block = (first_block * volume_blk_size) / 512;
2590 encryption_info->data_encryption_key_index =
2591 get_unaligned_le16(&raid_map->data_encryption_key_index);
2592 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2593 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2597 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2600 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2601 struct pqi_scsi_dev_raid_map_data *rmd)
2603 bool is_supported = true;
2605 switch (rmd->raid_level) {
2609 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2610 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2611 is_supported = false;
2613 case SA_RAID_TRIPLE:
2614 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2615 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2616 is_supported = false;
2619 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2620 rmd->data_length > ctrl_info->max_write_raid_5_6))
2621 is_supported = false;
2624 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2625 rmd->data_length > ctrl_info->max_write_raid_5_6))
2626 is_supported = false;
2629 is_supported = false;
2633 return is_supported;
2636 #define PQI_RAID_BYPASS_INELIGIBLE 1
2638 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2639 struct pqi_scsi_dev_raid_map_data *rmd)
2641 /* Check for valid opcode, get LBA and block count. */
2642 switch (scmd->cmnd[0]) {
2644 rmd->is_write = true;
2647 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2648 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2649 rmd->block_cnt = (u32)scmd->cmnd[4];
2650 if (rmd->block_cnt == 0)
2651 rmd->block_cnt = 256;
2654 rmd->is_write = true;
2657 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2658 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2661 rmd->is_write = true;
2664 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2665 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2668 rmd->is_write = true;
2671 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2672 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2675 /* Process via normal I/O path. */
2676 return PQI_RAID_BYPASS_INELIGIBLE;
2679 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2684 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2685 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2687 #if BITS_PER_LONG == 32
2691 rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2693 /* Check for invalid block or wraparound. */
2694 if (rmd->last_block >=
2695 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2696 rmd->last_block < rmd->first_block)
2697 return PQI_RAID_BYPASS_INELIGIBLE;
2699 rmd->data_disks_per_row =
2700 get_unaligned_le16(&raid_map->data_disks_per_row);
2701 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2702 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2704 /* Calculate stripe information for the request. */
2705 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2706 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2707 return PQI_RAID_BYPASS_INELIGIBLE;
2708 #if BITS_PER_LONG == 32
2709 tmpdiv = rmd->first_block;
2710 do_div(tmpdiv, rmd->blocks_per_row);
2711 rmd->first_row = tmpdiv;
2712 tmpdiv = rmd->last_block;
2713 do_div(tmpdiv, rmd->blocks_per_row);
2714 rmd->last_row = tmpdiv;
2715 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2716 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2717 tmpdiv = rmd->first_row_offset;
2718 do_div(tmpdiv, rmd->strip_size);
2719 rmd->first_column = tmpdiv;
2720 tmpdiv = rmd->last_row_offset;
2721 do_div(tmpdiv, rmd->strip_size);
2722 rmd->last_column = tmpdiv;
2724 rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2725 rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2726 rmd->first_row_offset = (u32)(rmd->first_block -
2727 (rmd->first_row * rmd->blocks_per_row));
2728 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2729 rmd->blocks_per_row));
2730 rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2731 rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2734 /* If this isn't a single row/column then give to the controller. */
2735 if (rmd->first_row != rmd->last_row ||
2736 rmd->first_column != rmd->last_column)
2737 return PQI_RAID_BYPASS_INELIGIBLE;
2739 /* Proceeding with driver mapping. */
2740 rmd->total_disks_per_row = rmd->data_disks_per_row +
2741 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2742 rmd->map_row = ((u32)(rmd->first_row >>
2743 raid_map->parity_rotation_shift)) %
2744 get_unaligned_le16(&raid_map->row_cnt);
2745 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2751 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2752 struct raid_map *raid_map)
2754 #if BITS_PER_LONG == 32
2758 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2759 return PQI_RAID_BYPASS_INELIGIBLE;
2762 /* Verify first and last block are in same RAID group. */
2763 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2764 #if BITS_PER_LONG == 32
2765 tmpdiv = rmd->first_block;
2766 rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2767 tmpdiv = rmd->first_group;
2768 do_div(tmpdiv, rmd->blocks_per_row);
2769 rmd->first_group = tmpdiv;
2770 tmpdiv = rmd->last_block;
2771 rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2772 tmpdiv = rmd->last_group;
2773 do_div(tmpdiv, rmd->blocks_per_row);
2774 rmd->last_group = tmpdiv;
2776 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2777 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2779 if (rmd->first_group != rmd->last_group)
2780 return PQI_RAID_BYPASS_INELIGIBLE;
2782 /* Verify request is in a single row of RAID 5/6. */
2783 #if BITS_PER_LONG == 32
2784 tmpdiv = rmd->first_block;
2785 do_div(tmpdiv, rmd->stripesize);
2786 rmd->first_row = tmpdiv;
2787 rmd->r5or6_first_row = tmpdiv;
2788 tmpdiv = rmd->last_block;
2789 do_div(tmpdiv, rmd->stripesize);
2790 rmd->r5or6_last_row = tmpdiv;
2792 rmd->first_row = rmd->r5or6_first_row =
2793 rmd->first_block / rmd->stripesize;
2794 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2796 if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2797 return PQI_RAID_BYPASS_INELIGIBLE;
2799 /* Verify request is in a single column. */
2800 #if BITS_PER_LONG == 32
2801 tmpdiv = rmd->first_block;
2802 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2803 tmpdiv = rmd->first_row_offset;
2804 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2805 rmd->r5or6_first_row_offset = rmd->first_row_offset;
2806 tmpdiv = rmd->last_block;
2807 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2808 tmpdiv = rmd->r5or6_last_row_offset;
2809 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2810 tmpdiv = rmd->r5or6_first_row_offset;
2811 do_div(tmpdiv, rmd->strip_size);
2812 rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2813 tmpdiv = rmd->r5or6_last_row_offset;
2814 do_div(tmpdiv, rmd->strip_size);
2815 rmd->r5or6_last_column = tmpdiv;
2817 rmd->first_row_offset = rmd->r5or6_first_row_offset =
2818 (u32)((rmd->first_block % rmd->stripesize) %
2819 rmd->blocks_per_row);
2821 rmd->r5or6_last_row_offset =
2822 (u32)((rmd->last_block % rmd->stripesize) %
2823 rmd->blocks_per_row);
2826 rmd->r5or6_first_row_offset / rmd->strip_size;
2827 rmd->r5or6_first_column = rmd->first_column;
2828 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2830 if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2831 return PQI_RAID_BYPASS_INELIGIBLE;
2833 /* Request is eligible. */
2835 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2836 get_unaligned_le16(&raid_map->row_cnt);
2838 rmd->map_index = (rmd->first_group *
2839 (get_unaligned_le16(&raid_map->row_cnt) *
2840 rmd->total_disks_per_row)) +
2841 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2843 if (rmd->is_write) {
2847 * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2848 * parity entries inside the device's raid_map.
2850 * A device's RAID map is bounded by: number of RAID disks squared.
2852 * The devices RAID map size is checked during device
2855 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2856 index *= rmd->total_disks_per_row;
2857 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2859 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2860 if (rmd->raid_level == SA_RAID_6) {
2861 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2862 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2864 #if BITS_PER_LONG == 32
2865 tmpdiv = rmd->first_block;
2866 do_div(tmpdiv, rmd->blocks_per_row);
2869 rmd->row = rmd->first_block / rmd->blocks_per_row;
2876 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2878 /* Build the new CDB for the physical disk I/O. */
2879 if (rmd->disk_block > 0xffffffff) {
2880 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2882 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2883 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2886 rmd->cdb_length = 16;
2888 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2890 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2892 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2894 rmd->cdb_length = 10;
2898 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2899 struct pqi_scsi_dev_raid_map_data *rmd)
2904 group = rmd->map_index / rmd->data_disks_per_row;
2906 index = rmd->map_index - (group * rmd->data_disks_per_row);
2907 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2908 index += rmd->data_disks_per_row;
2909 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2910 if (rmd->layout_map_count > 2) {
2911 index += rmd->data_disks_per_row;
2912 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2915 rmd->num_it_nexus_entries = rmd->layout_map_count;
2918 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2919 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2920 struct pqi_queue_group *queue_group)
2923 struct raid_map *raid_map;
2925 u32 next_bypass_group;
2926 struct pqi_encryption_info *encryption_info_ptr;
2927 struct pqi_encryption_info encryption_info;
2928 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2930 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2932 return PQI_RAID_BYPASS_INELIGIBLE;
2934 rmd.raid_level = device->raid_level;
2936 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2937 return PQI_RAID_BYPASS_INELIGIBLE;
2939 if (unlikely(rmd.block_cnt == 0))
2940 return PQI_RAID_BYPASS_INELIGIBLE;
2942 raid_map = device->raid_map;
2944 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2946 return PQI_RAID_BYPASS_INELIGIBLE;
2948 if (device->raid_level == SA_RAID_1 ||
2949 device->raid_level == SA_RAID_TRIPLE) {
2951 pqi_calc_aio_r1_nexus(raid_map, &rmd);
2953 group = device->next_bypass_group;
2954 next_bypass_group = group + 1;
2955 if (next_bypass_group >= rmd.layout_map_count)
2956 next_bypass_group = 0;
2957 device->next_bypass_group = next_bypass_group;
2958 rmd.map_index += group * rmd.data_disks_per_row;
2960 } else if ((device->raid_level == SA_RAID_5 ||
2961 device->raid_level == SA_RAID_6) &&
2962 (rmd.layout_map_count > 1 || rmd.is_write)) {
2963 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
2965 return PQI_RAID_BYPASS_INELIGIBLE;
2968 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
2969 return PQI_RAID_BYPASS_INELIGIBLE;
2971 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
2972 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2973 rmd.first_row * rmd.strip_size +
2974 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
2975 rmd.disk_block_cnt = rmd.block_cnt;
2977 /* Handle differing logical/physical block sizes. */
2978 if (raid_map->phys_blk_shift) {
2979 rmd.disk_block <<= raid_map->phys_blk_shift;
2980 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
2983 if (unlikely(rmd.disk_block_cnt > 0xffff))
2984 return PQI_RAID_BYPASS_INELIGIBLE;
2986 pqi_set_aio_cdb(&rmd);
2988 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
2989 if (rmd.data_length > device->max_transfer_encrypted)
2990 return PQI_RAID_BYPASS_INELIGIBLE;
2991 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
2992 encryption_info_ptr = &encryption_info;
2994 encryption_info_ptr = NULL;
2998 switch (device->raid_level) {
3000 case SA_RAID_TRIPLE:
3001 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
3002 encryption_info_ptr, device, &rmd);
3005 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
3006 encryption_info_ptr, device, &rmd);
3010 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3011 rmd.cdb, rmd.cdb_length, queue_group,
3012 encryption_info_ptr, true);
3015 #define PQI_STATUS_IDLE 0x0
3017 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
3018 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
3020 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
3021 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
3022 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
3023 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
3024 #define PQI_DEVICE_STATE_ERROR 0x4
3026 #define PQI_MODE_READY_TIMEOUT_SECS 30
3027 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
3029 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3031 struct pqi_device_registers __iomem *pqi_registers;
3032 unsigned long timeout;
3036 pqi_registers = ctrl_info->pqi_registers;
3037 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
3040 signature = readq(&pqi_registers->signature);
3041 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3042 sizeof(signature)) == 0)
3044 if (time_after(jiffies, timeout)) {
3045 dev_err(&ctrl_info->pci_dev->dev,
3046 "timed out waiting for PQI signature\n");
3049 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3053 status = readb(&pqi_registers->function_and_status_code);
3054 if (status == PQI_STATUS_IDLE)
3056 if (time_after(jiffies, timeout)) {
3057 dev_err(&ctrl_info->pci_dev->dev,
3058 "timed out waiting for PQI IDLE\n");
3061 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3065 if (readl(&pqi_registers->device_status) ==
3066 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3068 if (time_after(jiffies, timeout)) {
3069 dev_err(&ctrl_info->pci_dev->dev,
3070 "timed out waiting for PQI all registers ready\n");
3073 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3079 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3081 struct pqi_scsi_dev *device;
3083 device = io_request->scmd->device->hostdata;
3084 device->raid_bypass_enabled = false;
3085 device->aio_enabled = false;
3088 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
3090 struct pqi_ctrl_info *ctrl_info;
3091 struct pqi_scsi_dev *device;
3093 device = sdev->hostdata;
3094 if (device->device_offline)
3097 device->device_offline = true;
3098 ctrl_info = shost_to_hba(sdev->host);
3099 pqi_schedule_rescan_worker(ctrl_info);
3100 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
3101 path, ctrl_info->scsi_host->host_no, device->bus,
3102 device->target, device->lun);
3105 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3109 struct scsi_cmnd *scmd;
3110 struct pqi_raid_error_info *error_info;
3111 size_t sense_data_length;
3114 struct scsi_sense_hdr sshdr;
3116 scmd = io_request->scmd;
3120 error_info = io_request->error_info;
3121 scsi_status = error_info->status;
3124 switch (error_info->data_out_result) {
3125 case PQI_DATA_IN_OUT_GOOD:
3127 case PQI_DATA_IN_OUT_UNDERFLOW:
3129 get_unaligned_le32(&error_info->data_out_transferred);
3130 residual_count = scsi_bufflen(scmd) - xfer_count;
3131 scsi_set_resid(scmd, residual_count);
3132 if (xfer_count < scmd->underflow)
3133 host_byte = DID_SOFT_ERROR;
3135 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3136 case PQI_DATA_IN_OUT_ABORTED:
3137 host_byte = DID_ABORT;
3139 case PQI_DATA_IN_OUT_TIMEOUT:
3140 host_byte = DID_TIME_OUT;
3142 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3143 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3144 case PQI_DATA_IN_OUT_BUFFER_ERROR:
3145 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3146 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3147 case PQI_DATA_IN_OUT_ERROR:
3148 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3149 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3150 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3151 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3152 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3153 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3154 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3155 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3156 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3157 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3159 host_byte = DID_ERROR;
3163 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3164 if (sense_data_length == 0)
3166 get_unaligned_le16(&error_info->response_data_length);
3167 if (sense_data_length) {
3168 if (sense_data_length > sizeof(error_info->data))
3169 sense_data_length = sizeof(error_info->data);
3171 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3172 scsi_normalize_sense(error_info->data,
3173 sense_data_length, &sshdr) &&
3174 sshdr.sense_key == HARDWARE_ERROR &&
3175 sshdr.asc == 0x3e) {
3176 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3177 struct pqi_scsi_dev *device = scmd->device->hostdata;
3179 switch (sshdr.ascq) {
3180 case 0x1: /* LOGICAL UNIT FAILURE */
3181 if (printk_ratelimit())
3182 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3183 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3184 pqi_take_device_offline(scmd->device, "RAID");
3185 host_byte = DID_NO_CONNECT;
3188 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3189 if (printk_ratelimit())
3190 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3191 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3196 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3197 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3198 memcpy(scmd->sense_buffer, error_info->data,
3202 scmd->result = scsi_status;
3203 set_host_byte(scmd, host_byte);
3206 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3210 struct scsi_cmnd *scmd;
3211 struct pqi_aio_error_info *error_info;
3212 size_t sense_data_length;
3215 bool device_offline;
3217 scmd = io_request->scmd;
3218 error_info = io_request->error_info;
3220 sense_data_length = 0;
3221 device_offline = false;
3223 switch (error_info->service_response) {
3224 case PQI_AIO_SERV_RESPONSE_COMPLETE:
3225 scsi_status = error_info->status;
3227 case PQI_AIO_SERV_RESPONSE_FAILURE:
3228 switch (error_info->status) {
3229 case PQI_AIO_STATUS_IO_ABORTED:
3230 scsi_status = SAM_STAT_TASK_ABORTED;
3232 case PQI_AIO_STATUS_UNDERRUN:
3233 scsi_status = SAM_STAT_GOOD;
3234 residual_count = get_unaligned_le32(
3235 &error_info->residual_count);
3236 scsi_set_resid(scmd, residual_count);
3237 xfer_count = scsi_bufflen(scmd) - residual_count;
3238 if (xfer_count < scmd->underflow)
3239 host_byte = DID_SOFT_ERROR;
3241 case PQI_AIO_STATUS_OVERRUN:
3242 scsi_status = SAM_STAT_GOOD;
3244 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3245 pqi_aio_path_disabled(io_request);
3246 scsi_status = SAM_STAT_GOOD;
3247 io_request->status = -EAGAIN;
3249 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3250 case PQI_AIO_STATUS_INVALID_DEVICE:
3251 if (!io_request->raid_bypass) {
3252 device_offline = true;
3253 pqi_take_device_offline(scmd->device, "AIO");
3254 host_byte = DID_NO_CONNECT;
3256 scsi_status = SAM_STAT_CHECK_CONDITION;
3258 case PQI_AIO_STATUS_IO_ERROR:
3260 scsi_status = SAM_STAT_CHECK_CONDITION;
3264 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3265 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3266 scsi_status = SAM_STAT_GOOD;
3268 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3269 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3271 scsi_status = SAM_STAT_CHECK_CONDITION;
3275 if (error_info->data_present) {
3277 get_unaligned_le16(&error_info->data_length);
3278 if (sense_data_length) {
3279 if (sense_data_length > sizeof(error_info->data))
3280 sense_data_length = sizeof(error_info->data);
3281 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3282 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3283 memcpy(scmd->sense_buffer, error_info->data,
3288 if (device_offline && sense_data_length == 0)
3289 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3291 scmd->result = scsi_status;
3292 set_host_byte(scmd, host_byte);
3295 static void pqi_process_io_error(unsigned int iu_type,
3296 struct pqi_io_request *io_request)
3299 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3300 pqi_process_raid_io_error(io_request);
3302 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3303 pqi_process_aio_io_error(io_request);
3308 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3309 struct pqi_task_management_response *response)
3313 switch (response->response_code) {
3314 case SOP_TMF_COMPLETE:
3315 case SOP_TMF_FUNCTION_SUCCEEDED:
3318 case SOP_TMF_REJECTED:
3327 dev_err(&ctrl_info->pci_dev->dev,
3328 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3333 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3334 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
3336 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
3339 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3344 struct pqi_io_request *io_request;
3345 struct pqi_io_response *response;
3349 oq_ci = queue_group->oq_ci_copy;
3352 oq_pi = readl(queue_group->oq_pi);
3353 if (oq_pi >= ctrl_info->num_elements_per_oq) {
3354 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
3355 dev_err(&ctrl_info->pci_dev->dev,
3356 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3357 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3364 response = queue_group->oq_element_array +
3365 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3367 request_id = get_unaligned_le16(&response->request_id);
3368 if (request_id >= ctrl_info->max_io_slots) {
3369 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
3370 dev_err(&ctrl_info->pci_dev->dev,
3371 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
3372 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3376 io_request = &ctrl_info->io_request_pool[request_id];
3377 if (atomic_read(&io_request->refcount) == 0) {
3378 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
3379 dev_err(&ctrl_info->pci_dev->dev,
3380 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
3381 request_id, oq_pi, oq_ci);
3385 switch (response->header.iu_type) {
3386 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3387 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3388 if (io_request->scmd)
3389 io_request->scmd->result = 0;
3391 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3393 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3394 io_request->status =
3396 &((struct pqi_vendor_general_response *)response)->status);
3398 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3399 io_request->status = pqi_interpret_task_management_response(ctrl_info,
3402 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3403 pqi_aio_path_disabled(io_request);
3404 io_request->status = -EAGAIN;
3406 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3407 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3408 io_request->error_info = ctrl_info->error_buffer +
3409 (get_unaligned_le16(&response->error_index) *
3410 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3411 pqi_process_io_error(response->header.iu_type, io_request);
3414 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
3415 dev_err(&ctrl_info->pci_dev->dev,
3416 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
3417 response->header.iu_type, oq_pi, oq_ci);
3421 io_request->io_complete_callback(io_request, io_request->context);
3424 * Note that the I/O request structure CANNOT BE TOUCHED after
3425 * returning from the I/O completion callback!
3427 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3430 if (num_responses) {
3431 queue_group->oq_ci_copy = oq_ci;
3432 writel(oq_ci, queue_group->oq_ci);
3435 return num_responses;
3438 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3439 unsigned int ci, unsigned int elements_in_queue)
3441 unsigned int num_elements_used;
3444 num_elements_used = pi - ci;
3446 num_elements_used = elements_in_queue - ci + pi;
3448 return elements_in_queue - num_elements_used - 1;
3451 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3452 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3456 unsigned long flags;
3458 struct pqi_queue_group *queue_group;
3460 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3461 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3464 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3466 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3467 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3469 if (pqi_num_elements_free(iq_pi, iq_ci,
3470 ctrl_info->num_elements_per_iq))
3473 spin_unlock_irqrestore(
3474 &queue_group->submit_lock[RAID_PATH], flags);
3476 if (pqi_ctrl_offline(ctrl_info))
3480 next_element = queue_group->iq_element_array[RAID_PATH] +
3481 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3483 memcpy(next_element, iu, iu_length);
3485 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3486 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3489 * This write notifies the controller that an IU is available to be
3492 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3494 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3497 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3498 struct pqi_event *event)
3500 struct pqi_event_acknowledge_request request;
3502 memset(&request, 0, sizeof(request));
3504 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3505 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3506 &request.header.iu_length);
3507 request.event_type = event->event_type;
3508 put_unaligned_le16(event->event_id, &request.event_id);
3509 put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3511 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3514 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3515 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3517 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3518 struct pqi_ctrl_info *ctrl_info)
3521 unsigned long timeout;
3523 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
3526 status = pqi_read_soft_reset_status(ctrl_info);
3527 if (status & PQI_SOFT_RESET_INITIATE)
3528 return RESET_INITIATE_DRIVER;
3530 if (status & PQI_SOFT_RESET_ABORT)
3533 if (!sis_is_firmware_running(ctrl_info))
3534 return RESET_NORESPONSE;
3536 if (time_after(jiffies, timeout)) {
3537 dev_warn(&ctrl_info->pci_dev->dev,
3538 "timed out waiting for soft reset status\n");
3539 return RESET_TIMEDOUT;
3542 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3546 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3549 unsigned int delay_secs;
3550 enum pqi_soft_reset_status reset_status;
3552 if (ctrl_info->soft_reset_handshake_supported)
3553 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3555 reset_status = RESET_INITIATE_FIRMWARE;
3557 delay_secs = PQI_POST_RESET_DELAY_SECS;
3559 switch (reset_status) {
3560 case RESET_TIMEDOUT:
3561 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3563 case RESET_INITIATE_DRIVER:
3564 dev_info(&ctrl_info->pci_dev->dev,
3565 "Online Firmware Activation: resetting controller\n");
3566 sis_soft_reset(ctrl_info);
3568 case RESET_INITIATE_FIRMWARE:
3569 ctrl_info->pqi_mode_enabled = false;
3570 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3571 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3572 pqi_ofa_free_host_buffer(ctrl_info);
3573 pqi_ctrl_ofa_done(ctrl_info);
3574 dev_info(&ctrl_info->pci_dev->dev,
3575 "Online Firmware Activation: %s\n",
3576 rc == 0 ? "SUCCESS" : "FAILED");
3579 dev_info(&ctrl_info->pci_dev->dev,
3580 "Online Firmware Activation ABORTED\n");
3581 if (ctrl_info->soft_reset_handshake_supported)
3582 pqi_clear_soft_reset_status(ctrl_info);
3583 pqi_ofa_free_host_buffer(ctrl_info);
3584 pqi_ctrl_ofa_done(ctrl_info);
3585 pqi_ofa_ctrl_unquiesce(ctrl_info);
3587 case RESET_NORESPONSE:
3590 dev_err(&ctrl_info->pci_dev->dev,
3591 "unexpected Online Firmware Activation reset status: 0x%x\n",
3593 pqi_ofa_free_host_buffer(ctrl_info);
3594 pqi_ctrl_ofa_done(ctrl_info);
3595 pqi_ofa_ctrl_unquiesce(ctrl_info);
3596 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
3601 static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3603 struct pqi_ctrl_info *ctrl_info;
3605 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3607 pqi_ctrl_ofa_start(ctrl_info);
3608 pqi_ofa_setup_host_buffer(ctrl_info);
3609 pqi_ofa_host_memory_update(ctrl_info);
3612 static void pqi_ofa_quiesce_worker(struct work_struct *work)
3614 struct pqi_ctrl_info *ctrl_info;
3615 struct pqi_event *event;
3617 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3619 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3621 pqi_ofa_ctrl_quiesce(ctrl_info);
3622 pqi_acknowledge_event(ctrl_info, event);
3623 pqi_process_soft_reset(ctrl_info);
3626 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3627 struct pqi_event *event)
3633 switch (event->event_id) {
3634 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3635 dev_info(&ctrl_info->pci_dev->dev,
3636 "received Online Firmware Activation memory allocation request\n");
3637 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3639 case PQI_EVENT_OFA_QUIESCE:
3640 dev_info(&ctrl_info->pci_dev->dev,
3641 "received Online Firmware Activation quiesce request\n");
3642 schedule_work(&ctrl_info->ofa_quiesce_work);
3645 case PQI_EVENT_OFA_CANCELED:
3646 dev_info(&ctrl_info->pci_dev->dev,
3647 "received Online Firmware Activation cancel request: reason: %u\n",
3648 ctrl_info->ofa_cancel_reason);
3649 pqi_ofa_free_host_buffer(ctrl_info);
3650 pqi_ctrl_ofa_done(ctrl_info);
3653 dev_err(&ctrl_info->pci_dev->dev,
3654 "received unknown Online Firmware Activation request: event ID: %u\n",
3662 static void pqi_event_worker(struct work_struct *work)
3666 struct pqi_ctrl_info *ctrl_info;
3667 struct pqi_event *event;
3670 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3672 pqi_ctrl_busy(ctrl_info);
3673 pqi_wait_if_ctrl_blocked(ctrl_info);
3674 if (pqi_ctrl_offline(ctrl_info))
3677 rescan_needed = false;
3678 event = ctrl_info->events;
3679 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3680 if (event->pending) {
3681 event->pending = false;
3682 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3683 ack_event = pqi_ofa_process_event(ctrl_info, event);
3686 rescan_needed = true;
3689 pqi_acknowledge_event(ctrl_info, event);
3695 pqi_schedule_rescan_worker_delayed(ctrl_info);
3698 pqi_ctrl_unbusy(ctrl_info);
3701 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
3703 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3706 u32 heartbeat_count;
3707 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
3709 pqi_check_ctrl_health(ctrl_info);
3710 if (pqi_ctrl_offline(ctrl_info))
3713 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3714 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3716 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3717 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3718 dev_err(&ctrl_info->pci_dev->dev,
3719 "no heartbeat detected - last heartbeat count: %u\n",
3721 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
3725 ctrl_info->previous_num_interrupts = num_interrupts;
3728 ctrl_info->previous_heartbeat_count = heartbeat_count;
3729 mod_timer(&ctrl_info->heartbeat_timer,
3730 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3733 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3735 if (!ctrl_info->heartbeat_counter)
3738 ctrl_info->previous_num_interrupts =
3739 atomic_read(&ctrl_info->num_interrupts);
3740 ctrl_info->previous_heartbeat_count =
3741 pqi_read_heartbeat_counter(ctrl_info);
3743 ctrl_info->heartbeat_timer.expires =
3744 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3745 add_timer(&ctrl_info->heartbeat_timer);
3748 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3750 del_timer_sync(&ctrl_info->heartbeat_timer);
3753 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3754 struct pqi_event *event, struct pqi_event_response *response)
3756 switch (event->event_id) {
3757 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3758 ctrl_info->ofa_bytes_requested =
3759 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3761 case PQI_EVENT_OFA_CANCELED:
3762 ctrl_info->ofa_cancel_reason =
3763 get_unaligned_le16(&response->data.ofa_cancelled.reason);
3768 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3773 struct pqi_event_queue *event_queue;
3774 struct pqi_event_response *response;
3775 struct pqi_event *event;
3778 event_queue = &ctrl_info->event_queue;
3780 oq_ci = event_queue->oq_ci_copy;
3783 oq_pi = readl(event_queue->oq_pi);
3784 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3785 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
3786 dev_err(&ctrl_info->pci_dev->dev,
3787 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3788 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3796 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3798 event_index = pqi_event_type_to_event_index(response->event_type);
3800 if (event_index >= 0 && response->request_acknowledge) {
3801 event = &ctrl_info->events[event_index];
3802 event->pending = true;
3803 event->event_type = response->event_type;
3804 event->event_id = get_unaligned_le16(&response->event_id);
3805 event->additional_event_id =
3806 get_unaligned_le32(&response->additional_event_id);
3807 if (event->event_type == PQI_EVENT_TYPE_OFA)
3808 pqi_ofa_capture_event_payload(ctrl_info, event, response);
3811 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3815 event_queue->oq_ci_copy = oq_ci;
3816 writel(oq_ci, event_queue->oq_ci);
3817 schedule_work(&ctrl_info->event_work);
3823 #define PQI_LEGACY_INTX_MASK 0x1
3825 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3828 struct pqi_device_registers __iomem *pqi_registers;
3829 volatile void __iomem *register_addr;
3831 pqi_registers = ctrl_info->pqi_registers;
3834 register_addr = &pqi_registers->legacy_intx_mask_clear;
3836 register_addr = &pqi_registers->legacy_intx_mask_set;
3838 intx_mask = readl(register_addr);
3839 intx_mask |= PQI_LEGACY_INTX_MASK;
3840 writel(intx_mask, register_addr);
3843 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3844 enum pqi_irq_mode new_mode)
3846 switch (ctrl_info->irq_mode) {
3852 pqi_configure_legacy_intx(ctrl_info, true);
3853 sis_enable_intx(ctrl_info);
3862 pqi_configure_legacy_intx(ctrl_info, false);
3863 sis_enable_msix(ctrl_info);
3868 pqi_configure_legacy_intx(ctrl_info, false);
3875 sis_enable_msix(ctrl_info);
3878 pqi_configure_legacy_intx(ctrl_info, true);
3879 sis_enable_intx(ctrl_info);
3887 ctrl_info->irq_mode = new_mode;
3890 #define PQI_LEGACY_INTX_PENDING 0x1
3892 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3897 switch (ctrl_info->irq_mode) {
3902 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
3903 if (intx_status & PQI_LEGACY_INTX_PENDING)
3917 static irqreturn_t pqi_irq_handler(int irq, void *data)
3919 struct pqi_ctrl_info *ctrl_info;
3920 struct pqi_queue_group *queue_group;
3921 int num_io_responses_handled;
3922 int num_events_handled;
3925 ctrl_info = queue_group->ctrl_info;
3927 if (!pqi_is_valid_irq(ctrl_info))
3930 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3931 if (num_io_responses_handled < 0)
3934 if (irq == ctrl_info->event_irq) {
3935 num_events_handled = pqi_process_event_intr(ctrl_info);
3936 if (num_events_handled < 0)
3939 num_events_handled = 0;
3942 if (num_io_responses_handled + num_events_handled > 0)
3943 atomic_inc(&ctrl_info->num_interrupts);
3945 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3946 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3952 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3954 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3958 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3960 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3961 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3962 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3964 dev_err(&pci_dev->dev,
3965 "irq %u init failed with error %d\n",
3966 pci_irq_vector(pci_dev, i), rc);
3969 ctrl_info->num_msix_vectors_initialized++;
3975 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3979 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3980 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3981 &ctrl_info->queue_groups[i]);
3983 ctrl_info->num_msix_vectors_initialized = 0;
3986 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3988 int num_vectors_enabled;
3990 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3991 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3992 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3993 if (num_vectors_enabled < 0) {
3994 dev_err(&ctrl_info->pci_dev->dev,
3995 "MSI-X init failed with error %d\n",
3996 num_vectors_enabled);
3997 return num_vectors_enabled;
4000 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
4001 ctrl_info->irq_mode = IRQ_MODE_MSIX;
4005 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4007 if (ctrl_info->num_msix_vectors_enabled) {
4008 pci_free_irq_vectors(ctrl_info->pci_dev);
4009 ctrl_info->num_msix_vectors_enabled = 0;
4013 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4016 size_t alloc_length;
4017 size_t element_array_length_per_iq;
4018 size_t element_array_length_per_oq;
4019 void *element_array;
4020 void __iomem *next_queue_index;
4021 void *aligned_pointer;
4022 unsigned int num_inbound_queues;
4023 unsigned int num_outbound_queues;
4024 unsigned int num_queue_indexes;
4025 struct pqi_queue_group *queue_group;
4027 element_array_length_per_iq =
4028 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4029 ctrl_info->num_elements_per_iq;
4030 element_array_length_per_oq =
4031 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4032 ctrl_info->num_elements_per_oq;
4033 num_inbound_queues = ctrl_info->num_queue_groups * 2;
4034 num_outbound_queues = ctrl_info->num_queue_groups;
4035 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4037 aligned_pointer = NULL;
4039 for (i = 0; i < num_inbound_queues; i++) {
4040 aligned_pointer = PTR_ALIGN(aligned_pointer,
4041 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4042 aligned_pointer += element_array_length_per_iq;
4045 for (i = 0; i < num_outbound_queues; i++) {
4046 aligned_pointer = PTR_ALIGN(aligned_pointer,
4047 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4048 aligned_pointer += element_array_length_per_oq;
4051 aligned_pointer = PTR_ALIGN(aligned_pointer,
4052 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4053 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4054 PQI_EVENT_OQ_ELEMENT_LENGTH;
4056 for (i = 0; i < num_queue_indexes; i++) {
4057 aligned_pointer = PTR_ALIGN(aligned_pointer,
4058 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4059 aligned_pointer += sizeof(pqi_index_t);
4062 alloc_length = (size_t)aligned_pointer +
4063 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4065 alloc_length += PQI_EXTRA_SGL_MEMORY;
4067 ctrl_info->queue_memory_base =
4068 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4069 &ctrl_info->queue_memory_base_dma_handle,
4072 if (!ctrl_info->queue_memory_base)
4075 ctrl_info->queue_memory_length = alloc_length;
4077 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4078 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4080 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4081 queue_group = &ctrl_info->queue_groups[i];
4082 queue_group->iq_element_array[RAID_PATH] = element_array;
4083 queue_group->iq_element_array_bus_addr[RAID_PATH] =
4084 ctrl_info->queue_memory_base_dma_handle +
4085 (element_array - ctrl_info->queue_memory_base);
4086 element_array += element_array_length_per_iq;
4087 element_array = PTR_ALIGN(element_array,
4088 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4089 queue_group->iq_element_array[AIO_PATH] = element_array;
4090 queue_group->iq_element_array_bus_addr[AIO_PATH] =
4091 ctrl_info->queue_memory_base_dma_handle +
4092 (element_array - ctrl_info->queue_memory_base);
4093 element_array += element_array_length_per_iq;
4094 element_array = PTR_ALIGN(element_array,
4095 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4098 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4099 queue_group = &ctrl_info->queue_groups[i];
4100 queue_group->oq_element_array = element_array;
4101 queue_group->oq_element_array_bus_addr =
4102 ctrl_info->queue_memory_base_dma_handle +
4103 (element_array - ctrl_info->queue_memory_base);
4104 element_array += element_array_length_per_oq;
4105 element_array = PTR_ALIGN(element_array,
4106 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4109 ctrl_info->event_queue.oq_element_array = element_array;
4110 ctrl_info->event_queue.oq_element_array_bus_addr =
4111 ctrl_info->queue_memory_base_dma_handle +
4112 (element_array - ctrl_info->queue_memory_base);
4113 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4114 PQI_EVENT_OQ_ELEMENT_LENGTH;
4116 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
4117 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4119 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4120 queue_group = &ctrl_info->queue_groups[i];
4121 queue_group->iq_ci[RAID_PATH] = next_queue_index;
4122 queue_group->iq_ci_bus_addr[RAID_PATH] =
4123 ctrl_info->queue_memory_base_dma_handle +
4125 (void __iomem *)ctrl_info->queue_memory_base);
4126 next_queue_index += sizeof(pqi_index_t);
4127 next_queue_index = PTR_ALIGN(next_queue_index,
4128 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4129 queue_group->iq_ci[AIO_PATH] = next_queue_index;
4130 queue_group->iq_ci_bus_addr[AIO_PATH] =
4131 ctrl_info->queue_memory_base_dma_handle +
4133 (void __iomem *)ctrl_info->queue_memory_base);
4134 next_queue_index += sizeof(pqi_index_t);
4135 next_queue_index = PTR_ALIGN(next_queue_index,
4136 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4137 queue_group->oq_pi = next_queue_index;
4138 queue_group->oq_pi_bus_addr =
4139 ctrl_info->queue_memory_base_dma_handle +
4141 (void __iomem *)ctrl_info->queue_memory_base);
4142 next_queue_index += sizeof(pqi_index_t);
4143 next_queue_index = PTR_ALIGN(next_queue_index,
4144 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4147 ctrl_info->event_queue.oq_pi = next_queue_index;
4148 ctrl_info->event_queue.oq_pi_bus_addr =
4149 ctrl_info->queue_memory_base_dma_handle +
4151 (void __iomem *)ctrl_info->queue_memory_base);
4156 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4159 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4160 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4163 * Initialize the backpointers to the controller structure in
4164 * each operational queue group structure.
4166 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4167 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4170 * Assign IDs to all operational queues. Note that the IDs
4171 * assigned to operational IQs are independent of the IDs
4172 * assigned to operational OQs.
4174 ctrl_info->event_queue.oq_id = next_oq_id++;
4175 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4176 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4177 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4178 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4182 * Assign MSI-X table entry indexes to all queues. Note that the
4183 * interrupt for the event queue is shared with the first queue group.
4185 ctrl_info->event_queue.int_msg_num = 0;
4186 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4187 ctrl_info->queue_groups[i].int_msg_num = i;
4189 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4190 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4191 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4192 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4193 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4197 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4199 size_t alloc_length;
4200 struct pqi_admin_queues_aligned *admin_queues_aligned;
4201 struct pqi_admin_queues *admin_queues;
4203 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4204 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4206 ctrl_info->admin_queue_memory_base =
4207 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4208 &ctrl_info->admin_queue_memory_base_dma_handle,
4211 if (!ctrl_info->admin_queue_memory_base)
4214 ctrl_info->admin_queue_memory_length = alloc_length;
4216 admin_queues = &ctrl_info->admin_queues;
4217 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4218 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4219 admin_queues->iq_element_array =
4220 &admin_queues_aligned->iq_element_array;
4221 admin_queues->oq_element_array =
4222 &admin_queues_aligned->oq_element_array;
4223 admin_queues->iq_ci =
4224 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4225 admin_queues->oq_pi =
4226 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4228 admin_queues->iq_element_array_bus_addr =
4229 ctrl_info->admin_queue_memory_base_dma_handle +
4230 (admin_queues->iq_element_array -
4231 ctrl_info->admin_queue_memory_base);
4232 admin_queues->oq_element_array_bus_addr =
4233 ctrl_info->admin_queue_memory_base_dma_handle +
4234 (admin_queues->oq_element_array -
4235 ctrl_info->admin_queue_memory_base);
4236 admin_queues->iq_ci_bus_addr =
4237 ctrl_info->admin_queue_memory_base_dma_handle +
4238 ((void __iomem *)admin_queues->iq_ci -
4239 (void __iomem *)ctrl_info->admin_queue_memory_base);
4240 admin_queues->oq_pi_bus_addr =
4241 ctrl_info->admin_queue_memory_base_dma_handle +
4242 ((void __iomem *)admin_queues->oq_pi -
4243 (void __iomem *)ctrl_info->admin_queue_memory_base);
4248 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
4249 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
4251 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4253 struct pqi_device_registers __iomem *pqi_registers;
4254 struct pqi_admin_queues *admin_queues;
4255 unsigned long timeout;
4259 pqi_registers = ctrl_info->pqi_registers;
4260 admin_queues = &ctrl_info->admin_queues;
4262 writeq((u64)admin_queues->iq_element_array_bus_addr,
4263 &pqi_registers->admin_iq_element_array_addr);
4264 writeq((u64)admin_queues->oq_element_array_bus_addr,
4265 &pqi_registers->admin_oq_element_array_addr);
4266 writeq((u64)admin_queues->iq_ci_bus_addr,
4267 &pqi_registers->admin_iq_ci_addr);
4268 writeq((u64)admin_queues->oq_pi_bus_addr,
4269 &pqi_registers->admin_oq_pi_addr);
4271 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4272 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4273 (admin_queues->int_msg_num << 16);
4274 writel(reg, &pqi_registers->admin_iq_num_elements);
4276 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4277 &pqi_registers->function_and_status_code);
4279 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4281 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4282 status = readb(&pqi_registers->function_and_status_code);
4283 if (status == PQI_STATUS_IDLE)
4285 if (time_after(jiffies, timeout))
4290 * The offset registers are not initialized to the correct
4291 * offsets until *after* the create admin queue pair command
4292 * completes successfully.
4294 admin_queues->iq_pi = ctrl_info->iomem_base +
4295 PQI_DEVICE_REGISTERS_OFFSET +
4296 readq(&pqi_registers->admin_iq_pi_offset);
4297 admin_queues->oq_ci = ctrl_info->iomem_base +
4298 PQI_DEVICE_REGISTERS_OFFSET +
4299 readq(&pqi_registers->admin_oq_ci_offset);
4304 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4305 struct pqi_general_admin_request *request)
4307 struct pqi_admin_queues *admin_queues;
4311 admin_queues = &ctrl_info->admin_queues;
4312 iq_pi = admin_queues->iq_pi_copy;
4314 next_element = admin_queues->iq_element_array +
4315 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4317 memcpy(next_element, request, sizeof(*request));
4319 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4320 admin_queues->iq_pi_copy = iq_pi;
4323 * This write notifies the controller that an IU is available to be
4326 writel(iq_pi, admin_queues->iq_pi);
4329 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
4331 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4332 struct pqi_general_admin_response *response)
4334 struct pqi_admin_queues *admin_queues;
4337 unsigned long timeout;
4339 admin_queues = &ctrl_info->admin_queues;
4340 oq_ci = admin_queues->oq_ci_copy;
4342 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
4345 oq_pi = readl(admin_queues->oq_pi);
4348 if (time_after(jiffies, timeout)) {
4349 dev_err(&ctrl_info->pci_dev->dev,
4350 "timed out waiting for admin response\n");
4353 if (!sis_is_firmware_running(ctrl_info))
4355 usleep_range(1000, 2000);
4358 memcpy(response, admin_queues->oq_element_array +
4359 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4361 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4362 admin_queues->oq_ci_copy = oq_ci;
4363 writel(oq_ci, admin_queues->oq_ci);
4368 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4369 struct pqi_queue_group *queue_group, enum pqi_io_path path,
4370 struct pqi_io_request *io_request)
4372 struct pqi_io_request *next;
4377 unsigned long flags;
4378 unsigned int num_elements_needed;
4379 unsigned int num_elements_to_end_of_queue;
4381 struct pqi_iu_header *request;
4383 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4386 io_request->queue_group = queue_group;
4387 list_add_tail(&io_request->request_list_entry,
4388 &queue_group->request_list[path]);
4391 iq_pi = queue_group->iq_pi_copy[path];
4393 list_for_each_entry_safe(io_request, next,
4394 &queue_group->request_list[path], request_list_entry) {
4396 request = io_request->iu;
4398 iu_length = get_unaligned_le16(&request->iu_length) +
4399 PQI_REQUEST_HEADER_LENGTH;
4400 num_elements_needed =
4401 DIV_ROUND_UP(iu_length,
4402 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4404 iq_ci = readl(queue_group->iq_ci[path]);
4406 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4407 ctrl_info->num_elements_per_iq))
4410 put_unaligned_le16(queue_group->oq_id,
4411 &request->response_queue_id);
4413 next_element = queue_group->iq_element_array[path] +
4414 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4416 num_elements_to_end_of_queue =
4417 ctrl_info->num_elements_per_iq - iq_pi;
4419 if (num_elements_needed <= num_elements_to_end_of_queue) {
4420 memcpy(next_element, request, iu_length);
4422 copy_count = num_elements_to_end_of_queue *
4423 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4424 memcpy(next_element, request, copy_count);
4425 memcpy(queue_group->iq_element_array[path],
4426 (u8 *)request + copy_count,
4427 iu_length - copy_count);
4430 iq_pi = (iq_pi + num_elements_needed) %
4431 ctrl_info->num_elements_per_iq;
4433 list_del(&io_request->request_list_entry);
4436 if (iq_pi != queue_group->iq_pi_copy[path]) {
4437 queue_group->iq_pi_copy[path] = iq_pi;
4439 * This write notifies the controller that one or more IUs are
4440 * available to be processed.
4442 writel(iq_pi, queue_group->iq_pi[path]);
4445 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4448 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4450 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4451 struct completion *wait)
4456 if (wait_for_completion_io_timeout(wait,
4457 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
4462 pqi_check_ctrl_health(ctrl_info);
4463 if (pqi_ctrl_offline(ctrl_info)) {
4472 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4475 struct completion *waiting = context;
4480 static int pqi_process_raid_io_error_synchronous(
4481 struct pqi_raid_error_info *error_info)
4485 switch (error_info->data_out_result) {
4486 case PQI_DATA_IN_OUT_GOOD:
4487 if (error_info->status == SAM_STAT_GOOD)
4490 case PQI_DATA_IN_OUT_UNDERFLOW:
4491 if (error_info->status == SAM_STAT_GOOD ||
4492 error_info->status == SAM_STAT_CHECK_CONDITION)
4495 case PQI_DATA_IN_OUT_ABORTED:
4496 rc = PQI_CMD_STATUS_ABORTED;
4503 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4505 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4508 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4509 struct pqi_iu_header *request, unsigned int flags,
4510 struct pqi_raid_error_info *error_info)
4513 struct pqi_io_request *io_request;
4515 DECLARE_COMPLETION_ONSTACK(wait);
4517 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4518 if (down_interruptible(&ctrl_info->sync_request_sem))
4519 return -ERESTARTSYS;
4521 down(&ctrl_info->sync_request_sem);
4524 pqi_ctrl_busy(ctrl_info);
4526 * Wait for other admin queue updates such as;
4527 * config table changes, OFA memory updates, ...
4529 if (pqi_is_blockable_request(request))
4530 pqi_wait_if_ctrl_blocked(ctrl_info);
4532 if (pqi_ctrl_offline(ctrl_info)) {
4537 io_request = pqi_alloc_io_request(ctrl_info);
4539 put_unaligned_le16(io_request->index,
4540 &(((struct pqi_raid_path_request *)request)->request_id));
4542 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4543 ((struct pqi_raid_path_request *)request)->error_index =
4544 ((struct pqi_raid_path_request *)request)->request_id;
4546 iu_length = get_unaligned_le16(&request->iu_length) +
4547 PQI_REQUEST_HEADER_LENGTH;
4548 memcpy(io_request->iu, request, iu_length);
4550 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4551 io_request->context = &wait;
4553 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4556 pqi_wait_for_completion_io(ctrl_info, &wait);
4559 if (io_request->error_info)
4560 memcpy(error_info, io_request->error_info, sizeof(*error_info));
4562 memset(error_info, 0, sizeof(*error_info));
4563 } else if (rc == 0 && io_request->error_info) {
4564 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4567 pqi_free_io_request(io_request);
4570 pqi_ctrl_unbusy(ctrl_info);
4571 up(&ctrl_info->sync_request_sem);
4576 static int pqi_validate_admin_response(
4577 struct pqi_general_admin_response *response, u8 expected_function_code)
4579 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4582 if (get_unaligned_le16(&response->header.iu_length) !=
4583 PQI_GENERAL_ADMIN_IU_LENGTH)
4586 if (response->function_code != expected_function_code)
4589 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4595 static int pqi_submit_admin_request_synchronous(
4596 struct pqi_ctrl_info *ctrl_info,
4597 struct pqi_general_admin_request *request,
4598 struct pqi_general_admin_response *response)
4602 pqi_submit_admin_request(ctrl_info, request);
4604 rc = pqi_poll_for_admin_response(ctrl_info, response);
4607 rc = pqi_validate_admin_response(response, request->function_code);
4612 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4615 struct pqi_general_admin_request request;
4616 struct pqi_general_admin_response response;
4617 struct pqi_device_capability *capability;
4618 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4620 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4624 memset(&request, 0, sizeof(request));
4626 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4627 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4628 &request.header.iu_length);
4629 request.function_code =
4630 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4631 put_unaligned_le32(sizeof(*capability),
4632 &request.data.report_device_capability.buffer_length);
4634 rc = pqi_map_single(ctrl_info->pci_dev,
4635 &request.data.report_device_capability.sg_descriptor,
4636 capability, sizeof(*capability),
4641 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4643 pqi_pci_unmap(ctrl_info->pci_dev,
4644 &request.data.report_device_capability.sg_descriptor, 1,
4650 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4655 ctrl_info->max_inbound_queues =
4656 get_unaligned_le16(&capability->max_inbound_queues);
4657 ctrl_info->max_elements_per_iq =
4658 get_unaligned_le16(&capability->max_elements_per_iq);
4659 ctrl_info->max_iq_element_length =
4660 get_unaligned_le16(&capability->max_iq_element_length)
4662 ctrl_info->max_outbound_queues =
4663 get_unaligned_le16(&capability->max_outbound_queues);
4664 ctrl_info->max_elements_per_oq =
4665 get_unaligned_le16(&capability->max_elements_per_oq);
4666 ctrl_info->max_oq_element_length =
4667 get_unaligned_le16(&capability->max_oq_element_length)
4670 sop_iu_layer_descriptor =
4671 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4673 ctrl_info->max_inbound_iu_length_per_firmware =
4675 &sop_iu_layer_descriptor->max_inbound_iu_length);
4676 ctrl_info->inbound_spanning_supported =
4677 sop_iu_layer_descriptor->inbound_spanning_supported;
4678 ctrl_info->outbound_spanning_supported =
4679 sop_iu_layer_descriptor->outbound_spanning_supported;
4687 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4689 if (ctrl_info->max_iq_element_length <
4690 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4691 dev_err(&ctrl_info->pci_dev->dev,
4692 "max. inbound queue element length of %d is less than the required length of %d\n",
4693 ctrl_info->max_iq_element_length,
4694 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4698 if (ctrl_info->max_oq_element_length <
4699 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4700 dev_err(&ctrl_info->pci_dev->dev,
4701 "max. outbound queue element length of %d is less than the required length of %d\n",
4702 ctrl_info->max_oq_element_length,
4703 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4707 if (ctrl_info->max_inbound_iu_length_per_firmware <
4708 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4709 dev_err(&ctrl_info->pci_dev->dev,
4710 "max. inbound IU length of %u is less than the min. required length of %d\n",
4711 ctrl_info->max_inbound_iu_length_per_firmware,
4712 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4716 if (!ctrl_info->inbound_spanning_supported) {
4717 dev_err(&ctrl_info->pci_dev->dev,
4718 "the controller does not support inbound spanning\n");
4722 if (ctrl_info->outbound_spanning_supported) {
4723 dev_err(&ctrl_info->pci_dev->dev,
4724 "the controller supports outbound spanning but this driver does not\n");
4731 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4734 struct pqi_event_queue *event_queue;
4735 struct pqi_general_admin_request request;
4736 struct pqi_general_admin_response response;
4738 event_queue = &ctrl_info->event_queue;
4741 * Create OQ (Outbound Queue - device to host queue) to dedicate
4744 memset(&request, 0, sizeof(request));
4745 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4746 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4747 &request.header.iu_length);
4748 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4749 put_unaligned_le16(event_queue->oq_id,
4750 &request.data.create_operational_oq.queue_id);
4751 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4752 &request.data.create_operational_oq.element_array_addr);
4753 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4754 &request.data.create_operational_oq.pi_addr);
4755 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4756 &request.data.create_operational_oq.num_elements);
4757 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4758 &request.data.create_operational_oq.element_length);
4759 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4760 put_unaligned_le16(event_queue->int_msg_num,
4761 &request.data.create_operational_oq.int_msg_num);
4763 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4768 event_queue->oq_ci = ctrl_info->iomem_base +
4769 PQI_DEVICE_REGISTERS_OFFSET +
4771 &response.data.create_operational_oq.oq_ci_offset);
4776 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4777 unsigned int group_number)
4780 struct pqi_queue_group *queue_group;
4781 struct pqi_general_admin_request request;
4782 struct pqi_general_admin_response response;
4784 queue_group = &ctrl_info->queue_groups[group_number];
4787 * Create IQ (Inbound Queue - host to device queue) for
4790 memset(&request, 0, sizeof(request));
4791 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4792 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4793 &request.header.iu_length);
4794 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4795 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4796 &request.data.create_operational_iq.queue_id);
4798 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4799 &request.data.create_operational_iq.element_array_addr);
4800 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4801 &request.data.create_operational_iq.ci_addr);
4802 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4803 &request.data.create_operational_iq.num_elements);
4804 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4805 &request.data.create_operational_iq.element_length);
4806 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4808 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4811 dev_err(&ctrl_info->pci_dev->dev,
4812 "error creating inbound RAID queue\n");
4816 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4817 PQI_DEVICE_REGISTERS_OFFSET +
4819 &response.data.create_operational_iq.iq_pi_offset);
4822 * Create IQ (Inbound Queue - host to device queue) for
4823 * Advanced I/O (AIO) path.
4825 memset(&request, 0, sizeof(request));
4826 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4827 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4828 &request.header.iu_length);
4829 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4830 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4831 &request.data.create_operational_iq.queue_id);
4832 put_unaligned_le64((u64)queue_group->
4833 iq_element_array_bus_addr[AIO_PATH],
4834 &request.data.create_operational_iq.element_array_addr);
4835 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4836 &request.data.create_operational_iq.ci_addr);
4837 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4838 &request.data.create_operational_iq.num_elements);
4839 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4840 &request.data.create_operational_iq.element_length);
4841 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4843 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4846 dev_err(&ctrl_info->pci_dev->dev,
4847 "error creating inbound AIO queue\n");
4851 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4852 PQI_DEVICE_REGISTERS_OFFSET +
4854 &response.data.create_operational_iq.iq_pi_offset);
4857 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4858 * assumed to be for RAID path I/O unless we change the queue's
4861 memset(&request, 0, sizeof(request));
4862 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4863 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4864 &request.header.iu_length);
4865 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4866 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4867 &request.data.change_operational_iq_properties.queue_id);
4868 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4869 &request.data.change_operational_iq_properties.vendor_specific);
4871 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4874 dev_err(&ctrl_info->pci_dev->dev,
4875 "error changing queue property\n");
4880 * Create OQ (Outbound Queue - device to host queue).
4882 memset(&request, 0, sizeof(request));
4883 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4884 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4885 &request.header.iu_length);
4886 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4887 put_unaligned_le16(queue_group->oq_id,
4888 &request.data.create_operational_oq.queue_id);
4889 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4890 &request.data.create_operational_oq.element_array_addr);
4891 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4892 &request.data.create_operational_oq.pi_addr);
4893 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4894 &request.data.create_operational_oq.num_elements);
4895 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4896 &request.data.create_operational_oq.element_length);
4897 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4898 put_unaligned_le16(queue_group->int_msg_num,
4899 &request.data.create_operational_oq.int_msg_num);
4901 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4904 dev_err(&ctrl_info->pci_dev->dev,
4905 "error creating outbound queue\n");
4909 queue_group->oq_ci = ctrl_info->iomem_base +
4910 PQI_DEVICE_REGISTERS_OFFSET +
4912 &response.data.create_operational_oq.oq_ci_offset);
4917 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4922 rc = pqi_create_event_queue(ctrl_info);
4924 dev_err(&ctrl_info->pci_dev->dev,
4925 "error creating event queue\n");
4929 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4930 rc = pqi_create_queue_group(ctrl_info, i);
4932 dev_err(&ctrl_info->pci_dev->dev,
4933 "error creating queue group number %u/%u\n",
4934 i, ctrl_info->num_queue_groups);
4942 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4943 struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
4945 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4950 struct pqi_event_config *event_config;
4951 struct pqi_event_descriptor *event_descriptor;
4952 struct pqi_general_management_request request;
4954 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4959 memset(&request, 0, sizeof(request));
4961 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4962 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4963 data.report_event_configuration.sg_descriptors[1]) -
4964 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4965 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4966 &request.data.report_event_configuration.buffer_length);
4968 rc = pqi_map_single(ctrl_info->pci_dev,
4969 request.data.report_event_configuration.sg_descriptors,
4970 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4975 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
4977 pqi_pci_unmap(ctrl_info->pci_dev,
4978 request.data.report_event_configuration.sg_descriptors, 1,
4984 for (i = 0; i < event_config->num_event_descriptors; i++) {
4985 event_descriptor = &event_config->descriptors[i];
4986 if (enable_events &&
4987 pqi_is_supported_event(event_descriptor->event_type))
4988 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4989 &event_descriptor->oq_id);
4991 put_unaligned_le16(0, &event_descriptor->oq_id);
4994 memset(&request, 0, sizeof(request));
4996 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4997 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4998 data.report_event_configuration.sg_descriptors[1]) -
4999 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5000 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5001 &request.data.report_event_configuration.buffer_length);
5003 rc = pqi_map_single(ctrl_info->pci_dev,
5004 request.data.report_event_configuration.sg_descriptors,
5005 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5010 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5012 pqi_pci_unmap(ctrl_info->pci_dev,
5013 request.data.report_event_configuration.sg_descriptors, 1,
5017 kfree(event_config);
5022 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5024 return pqi_configure_events(ctrl_info, true);
5027 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5031 size_t sg_chain_buffer_length;
5032 struct pqi_io_request *io_request;
5034 if (!ctrl_info->io_request_pool)
5037 dev = &ctrl_info->pci_dev->dev;
5038 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5039 io_request = ctrl_info->io_request_pool;
5041 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5042 kfree(io_request->iu);
5043 if (!io_request->sg_chain_buffer)
5045 dma_free_coherent(dev, sg_chain_buffer_length,
5046 io_request->sg_chain_buffer,
5047 io_request->sg_chain_buffer_dma_handle);
5051 kfree(ctrl_info->io_request_pool);
5052 ctrl_info->io_request_pool = NULL;
5055 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5057 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5058 ctrl_info->error_buffer_length,
5059 &ctrl_info->error_buffer_dma_handle,
5061 if (!ctrl_info->error_buffer)
5067 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5070 void *sg_chain_buffer;
5071 size_t sg_chain_buffer_length;
5072 dma_addr_t sg_chain_buffer_dma_handle;
5074 struct pqi_io_request *io_request;
5076 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5077 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
5079 if (!ctrl_info->io_request_pool) {
5080 dev_err(&ctrl_info->pci_dev->dev,
5081 "failed to allocate I/O request pool\n");
5085 dev = &ctrl_info->pci_dev->dev;
5086 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5087 io_request = ctrl_info->io_request_pool;
5089 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5090 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
5092 if (!io_request->iu) {
5093 dev_err(&ctrl_info->pci_dev->dev,
5094 "failed to allocate IU buffers\n");
5098 sg_chain_buffer = dma_alloc_coherent(dev,
5099 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5102 if (!sg_chain_buffer) {
5103 dev_err(&ctrl_info->pci_dev->dev,
5104 "failed to allocate PQI scatter-gather chain buffers\n");
5108 io_request->index = i;
5109 io_request->sg_chain_buffer = sg_chain_buffer;
5110 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
5117 pqi_free_all_io_requests(ctrl_info);
5123 * Calculate required resources that are sized based on max. outstanding
5124 * requests and max. transfer size.
5127 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5129 u32 max_transfer_size;
5132 ctrl_info->scsi_ml_can_queue =
5133 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5134 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5136 ctrl_info->error_buffer_length =
5137 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5140 max_transfer_size = min(ctrl_info->max_transfer_size,
5141 PQI_MAX_TRANSFER_SIZE_KDUMP);
5143 max_transfer_size = min(ctrl_info->max_transfer_size,
5144 PQI_MAX_TRANSFER_SIZE);
5146 max_sg_entries = max_transfer_size / PAGE_SIZE;
5148 /* +1 to cover when the buffer is not page-aligned. */
5151 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5153 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5155 ctrl_info->sg_chain_buffer_length =
5156 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5157 PQI_EXTRA_SGL_MEMORY;
5158 ctrl_info->sg_tablesize = max_sg_entries;
5159 ctrl_info->max_sectors = max_transfer_size / 512;
5162 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5164 int num_queue_groups;
5165 u16 num_elements_per_iq;
5166 u16 num_elements_per_oq;
5168 if (reset_devices) {
5169 num_queue_groups = 1;
5172 int max_queue_groups;
5174 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5175 ctrl_info->max_outbound_queues - 1);
5176 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
5178 num_cpus = num_online_cpus();
5179 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
5180 num_queue_groups = min(num_queue_groups, max_queue_groups);
5183 ctrl_info->num_queue_groups = num_queue_groups;
5184 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
5187 * Make sure that the max. inbound IU length is an even multiple
5188 * of our inbound element length.
5190 ctrl_info->max_inbound_iu_length =
5191 (ctrl_info->max_inbound_iu_length_per_firmware /
5192 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5193 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
5195 num_elements_per_iq =
5196 (ctrl_info->max_inbound_iu_length /
5197 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5199 /* Add one because one element in each queue is unusable. */
5200 num_elements_per_iq++;
5202 num_elements_per_iq = min(num_elements_per_iq,
5203 ctrl_info->max_elements_per_iq);
5205 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5206 num_elements_per_oq = min(num_elements_per_oq,
5207 ctrl_info->max_elements_per_oq);
5209 ctrl_info->num_elements_per_iq = num_elements_per_iq;
5210 ctrl_info->num_elements_per_oq = num_elements_per_oq;
5212 ctrl_info->max_sg_per_iu =
5213 ((ctrl_info->max_inbound_iu_length -
5214 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5215 sizeof(struct pqi_sg_descriptor)) +
5216 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5218 ctrl_info->max_sg_per_r56_iu =
5219 ((ctrl_info->max_inbound_iu_length -
5220 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5221 sizeof(struct pqi_sg_descriptor)) +
5222 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5225 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5226 struct scatterlist *sg)
5228 u64 address = (u64)sg_dma_address(sg);
5229 unsigned int length = sg_dma_len(sg);
5231 put_unaligned_le64(address, &sg_descriptor->address);
5232 put_unaligned_le32(length, &sg_descriptor->length);
5233 put_unaligned_le32(0, &sg_descriptor->flags);
5236 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5237 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5238 int max_sg_per_iu, bool *chained)
5241 unsigned int num_sg_in_iu;
5246 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
5249 pqi_set_sg_descriptor(sg_descriptor, sg);
5256 if (i == max_sg_per_iu) {
5257 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5258 &sg_descriptor->address);
5259 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5260 &sg_descriptor->length);
5261 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5264 sg_descriptor = io_request->sg_chain_buffer;
5269 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5271 return num_sg_in_iu;
5274 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5275 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5276 struct pqi_io_request *io_request)
5281 unsigned int num_sg_in_iu;
5282 struct scatterlist *sg;
5283 struct pqi_sg_descriptor *sg_descriptor;
5285 sg_count = scsi_dma_map(scmd);
5289 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5290 PQI_REQUEST_HEADER_LENGTH;
5295 sg = scsi_sglist(scmd);
5296 sg_descriptor = request->sg_descriptors;
5298 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5299 ctrl_info->max_sg_per_iu, &chained);
5301 request->partial = chained;
5302 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5305 put_unaligned_le16(iu_length, &request->header.iu_length);
5310 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5311 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5312 struct pqi_io_request *io_request)
5317 unsigned int num_sg_in_iu;
5318 struct scatterlist *sg;
5319 struct pqi_sg_descriptor *sg_descriptor;
5321 sg_count = scsi_dma_map(scmd);
5325 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5326 PQI_REQUEST_HEADER_LENGTH;
5332 sg = scsi_sglist(scmd);
5333 sg_descriptor = request->sg_descriptors;
5335 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5336 ctrl_info->max_sg_per_iu, &chained);
5338 request->partial = chained;
5339 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5342 put_unaligned_le16(iu_length, &request->header.iu_length);
5343 request->num_sg_descriptors = num_sg_in_iu;
5348 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5349 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5350 struct pqi_io_request *io_request)
5355 unsigned int num_sg_in_iu;
5356 struct scatterlist *sg;
5357 struct pqi_sg_descriptor *sg_descriptor;
5359 sg_count = scsi_dma_map(scmd);
5363 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5364 PQI_REQUEST_HEADER_LENGTH;
5367 if (sg_count != 0) {
5368 sg = scsi_sglist(scmd);
5369 sg_descriptor = request->sg_descriptors;
5371 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5372 ctrl_info->max_sg_per_r56_iu, &chained);
5374 request->partial = chained;
5375 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5378 put_unaligned_le16(iu_length, &request->header.iu_length);
5379 request->num_sg_descriptors = num_sg_in_iu;
5384 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5385 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5386 struct pqi_io_request *io_request)
5391 unsigned int num_sg_in_iu;
5392 struct scatterlist *sg;
5393 struct pqi_sg_descriptor *sg_descriptor;
5395 sg_count = scsi_dma_map(scmd);
5399 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5400 PQI_REQUEST_HEADER_LENGTH;
5406 sg = scsi_sglist(scmd);
5407 sg_descriptor = request->sg_descriptors;
5409 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5410 ctrl_info->max_sg_per_iu, &chained);
5412 request->partial = chained;
5413 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5416 put_unaligned_le16(iu_length, &request->header.iu_length);
5417 request->num_sg_descriptors = num_sg_in_iu;
5422 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5425 struct scsi_cmnd *scmd;
5427 scmd = io_request->scmd;
5428 pqi_free_io_request(io_request);
5429 scsi_dma_unmap(scmd);
5430 pqi_scsi_done(scmd);
5433 static int pqi_raid_submit_scsi_cmd_with_io_request(
5434 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
5435 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5436 struct pqi_queue_group *queue_group)
5440 struct pqi_raid_path_request *request;
5442 io_request->io_complete_callback = pqi_raid_io_complete;
5443 io_request->scmd = scmd;
5445 request = io_request->iu;
5446 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5448 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5449 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5450 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5451 put_unaligned_le16(io_request->index, &request->request_id);
5452 request->error_index = request->request_id;
5453 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5455 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5456 memcpy(request->cdb, scmd->cmnd, cdb_length);
5458 switch (cdb_length) {
5463 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5466 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5469 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5472 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5476 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5480 switch (scmd->sc_data_direction) {
5482 request->data_direction = SOP_READ_FLAG;
5484 case DMA_FROM_DEVICE:
5485 request->data_direction = SOP_WRITE_FLAG;
5488 request->data_direction = SOP_NO_DIRECTION_FLAG;
5490 case DMA_BIDIRECTIONAL:
5491 request->data_direction = SOP_BIDIRECTIONAL;
5494 dev_err(&ctrl_info->pci_dev->dev,
5495 "unknown data direction: %d\n",
5496 scmd->sc_data_direction);
5500 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5502 pqi_free_io_request(io_request);
5503 return SCSI_MLQUEUE_HOST_BUSY;
5506 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5511 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5512 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5513 struct pqi_queue_group *queue_group)
5515 struct pqi_io_request *io_request;
5517 io_request = pqi_alloc_io_request(ctrl_info);
5519 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5520 device, scmd, queue_group);
5523 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5525 struct scsi_cmnd *scmd;
5526 struct pqi_scsi_dev *device;
5527 struct pqi_ctrl_info *ctrl_info;
5529 if (!io_request->raid_bypass)
5532 scmd = io_request->scmd;
5533 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5535 if (host_byte(scmd->result) == DID_NO_CONNECT)
5538 device = scmd->device->hostdata;
5539 if (pqi_device_offline(device) || pqi_device_in_remove(device))
5542 ctrl_info = shost_to_hba(scmd->device->host);
5543 if (pqi_ctrl_offline(ctrl_info))
5549 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5552 struct scsi_cmnd *scmd;
5554 scmd = io_request->scmd;
5555 scsi_dma_unmap(scmd);
5556 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5557 set_host_byte(scmd, DID_IMM_RETRY);
5558 scmd->SCp.this_residual++;
5561 pqi_free_io_request(io_request);
5562 pqi_scsi_done(scmd);
5565 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5566 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5567 struct pqi_queue_group *queue_group)
5569 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5570 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
5573 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5574 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5575 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5576 struct pqi_encryption_info *encryption_info, bool raid_bypass)
5579 struct pqi_io_request *io_request;
5580 struct pqi_aio_path_request *request;
5582 io_request = pqi_alloc_io_request(ctrl_info);
5583 io_request->io_complete_callback = pqi_aio_io_complete;
5584 io_request->scmd = scmd;
5585 io_request->raid_bypass = raid_bypass;
5587 request = io_request->iu;
5588 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5590 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5591 put_unaligned_le32(aio_handle, &request->nexus_id);
5592 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5593 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5594 put_unaligned_le16(io_request->index, &request->request_id);
5595 request->error_index = request->request_id;
5596 if (cdb_length > sizeof(request->cdb))
5597 cdb_length = sizeof(request->cdb);
5598 request->cdb_length = cdb_length;
5599 memcpy(request->cdb, cdb, cdb_length);
5601 switch (scmd->sc_data_direction) {
5603 request->data_direction = SOP_READ_FLAG;
5605 case DMA_FROM_DEVICE:
5606 request->data_direction = SOP_WRITE_FLAG;
5609 request->data_direction = SOP_NO_DIRECTION_FLAG;
5611 case DMA_BIDIRECTIONAL:
5612 request->data_direction = SOP_BIDIRECTIONAL;
5615 dev_err(&ctrl_info->pci_dev->dev,
5616 "unknown data direction: %d\n",
5617 scmd->sc_data_direction);
5621 if (encryption_info) {
5622 request->encryption_enable = true;
5623 put_unaligned_le16(encryption_info->data_encryption_key_index,
5624 &request->data_encryption_key_index);
5625 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5626 &request->encrypt_tweak_lower);
5627 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5628 &request->encrypt_tweak_upper);
5631 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5633 pqi_free_io_request(io_request);
5634 return SCSI_MLQUEUE_HOST_BUSY;
5637 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5642 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5643 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5644 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5645 struct pqi_scsi_dev_raid_map_data *rmd)
5648 struct pqi_io_request *io_request;
5649 struct pqi_aio_r1_path_request *r1_request;
5651 io_request = pqi_alloc_io_request(ctrl_info);
5652 io_request->io_complete_callback = pqi_aio_io_complete;
5653 io_request->scmd = scmd;
5654 io_request->raid_bypass = true;
5656 r1_request = io_request->iu;
5657 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5659 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5660 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5661 r1_request->num_drives = rmd->num_it_nexus_entries;
5662 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5663 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5664 if (rmd->num_it_nexus_entries == 3)
5665 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5667 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5668 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5669 put_unaligned_le16(io_request->index, &r1_request->request_id);
5670 r1_request->error_index = r1_request->request_id;
5671 if (rmd->cdb_length > sizeof(r1_request->cdb))
5672 rmd->cdb_length = sizeof(r1_request->cdb);
5673 r1_request->cdb_length = rmd->cdb_length;
5674 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5676 /* The direction is always write. */
5677 r1_request->data_direction = SOP_READ_FLAG;
5679 if (encryption_info) {
5680 r1_request->encryption_enable = true;
5681 put_unaligned_le16(encryption_info->data_encryption_key_index,
5682 &r1_request->data_encryption_key_index);
5683 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5684 &r1_request->encrypt_tweak_lower);
5685 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5686 &r1_request->encrypt_tweak_upper);
5689 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5691 pqi_free_io_request(io_request);
5692 return SCSI_MLQUEUE_HOST_BUSY;
5695 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5700 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5701 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5702 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5703 struct pqi_scsi_dev_raid_map_data *rmd)
5706 struct pqi_io_request *io_request;
5707 struct pqi_aio_r56_path_request *r56_request;
5709 io_request = pqi_alloc_io_request(ctrl_info);
5710 io_request->io_complete_callback = pqi_aio_io_complete;
5711 io_request->scmd = scmd;
5712 io_request->raid_bypass = true;
5714 r56_request = io_request->iu;
5715 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5717 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5718 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5720 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5722 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5723 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5724 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5725 if (rmd->raid_level == SA_RAID_6) {
5726 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5727 r56_request->xor_multiplier = rmd->xor_mult;
5729 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5730 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5731 put_unaligned_le64(rmd->row, &r56_request->row);
5733 put_unaligned_le16(io_request->index, &r56_request->request_id);
5734 r56_request->error_index = r56_request->request_id;
5736 if (rmd->cdb_length > sizeof(r56_request->cdb))
5737 rmd->cdb_length = sizeof(r56_request->cdb);
5738 r56_request->cdb_length = rmd->cdb_length;
5739 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5741 /* The direction is always write. */
5742 r56_request->data_direction = SOP_READ_FLAG;
5744 if (encryption_info) {
5745 r56_request->encryption_enable = true;
5746 put_unaligned_le16(encryption_info->data_encryption_key_index,
5747 &r56_request->data_encryption_key_index);
5748 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5749 &r56_request->encrypt_tweak_lower);
5750 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5751 &r56_request->encrypt_tweak_upper);
5754 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5756 pqi_free_io_request(io_request);
5757 return SCSI_MLQUEUE_HOST_BUSY;
5760 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5765 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5766 struct scsi_cmnd *scmd)
5770 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
5771 if (hw_queue > ctrl_info->max_hw_queue_index)
5777 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5779 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5782 return scmd->SCp.this_residual == 0;
5786 * This function gets called just before we hand the completed SCSI request
5790 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5792 struct pqi_scsi_dev *device;
5794 if (!scmd->device) {
5795 set_host_byte(scmd, DID_NO_CONNECT);
5799 device = scmd->device->hostdata;
5801 set_host_byte(scmd, DID_NO_CONNECT);
5805 atomic_dec(&device->scsi_cmds_outstanding);
5808 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5809 struct scsi_cmnd *scmd)
5815 struct pqi_scsi_dev *device;
5816 struct pqi_stream_data *pqi_stream_data;
5817 struct pqi_scsi_dev_raid_map_data rmd;
5819 if (!ctrl_info->enable_stream_detection)
5822 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5826 /* Check writes only. */
5830 device = scmd->device->hostdata;
5832 /* Check for RAID 5/6 streams. */
5833 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5837 * If controller does not support AIO RAID{5,6} writes, need to send
5838 * requests down non-AIO path.
5840 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5841 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5845 oldest_jiffies = INT_MAX;
5846 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5847 pqi_stream_data = &device->stream_data[i];
5849 * Check for adjacent request or request is within
5850 * the previous request.
5852 if ((pqi_stream_data->next_lba &&
5853 rmd.first_block >= pqi_stream_data->next_lba) &&
5854 rmd.first_block <= pqi_stream_data->next_lba +
5856 pqi_stream_data->next_lba = rmd.first_block +
5858 pqi_stream_data->last_accessed = jiffies;
5863 if (pqi_stream_data->last_accessed == 0) {
5868 /* Find entry with oldest last accessed time. */
5869 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
5870 oldest_jiffies = pqi_stream_data->last_accessed;
5875 /* Set LRU entry. */
5876 pqi_stream_data = &device->stream_data[lru_index];
5877 pqi_stream_data->last_accessed = jiffies;
5878 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
5883 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5886 struct pqi_ctrl_info *ctrl_info;
5887 struct pqi_scsi_dev *device;
5889 struct pqi_queue_group *queue_group;
5892 device = scmd->device->hostdata;
5895 set_host_byte(scmd, DID_NO_CONNECT);
5896 pqi_scsi_done(scmd);
5900 atomic_inc(&device->scsi_cmds_outstanding);
5902 ctrl_info = shost_to_hba(shost);
5904 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
5905 set_host_byte(scmd, DID_NO_CONNECT);
5906 pqi_scsi_done(scmd);
5910 if (pqi_ctrl_blocked(ctrl_info)) {
5911 rc = SCSI_MLQUEUE_HOST_BUSY;
5916 * This is necessary because the SML doesn't zero out this field during
5921 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5922 queue_group = &ctrl_info->queue_groups[hw_queue];
5924 if (pqi_is_logical_device(device)) {
5925 raid_bypassed = false;
5926 if (device->raid_bypass_enabled &&
5927 pqi_is_bypass_eligible_request(scmd) &&
5928 !pqi_is_parity_write_stream(ctrl_info, scmd)) {
5929 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5930 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
5931 raid_bypassed = true;
5932 atomic_inc(&device->raid_bypass_cnt);
5936 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5938 if (device->aio_enabled)
5939 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5941 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5946 atomic_dec(&device->scsi_cmds_outstanding);
5951 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
5955 unsigned long flags;
5956 unsigned int queued_io_count;
5957 struct pqi_queue_group *queue_group;
5958 struct pqi_io_request *io_request;
5960 queued_io_count = 0;
5962 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5963 queue_group = &ctrl_info->queue_groups[i];
5964 for (path = 0; path < 2; path++) {
5965 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
5966 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
5968 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
5972 return queued_io_count;
5975 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
5979 unsigned int nonempty_inbound_queue_count;
5980 struct pqi_queue_group *queue_group;
5984 nonempty_inbound_queue_count = 0;
5986 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5987 queue_group = &ctrl_info->queue_groups[i];
5988 for (path = 0; path < 2; path++) {
5989 iq_pi = queue_group->iq_pi_copy[path];
5990 iq_ci = readl(queue_group->iq_ci[path]);
5992 nonempty_inbound_queue_count++;
5996 return nonempty_inbound_queue_count;
5999 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10
6001 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6003 unsigned long start_jiffies;
6004 unsigned long warning_timeout;
6005 unsigned int queued_io_count;
6006 unsigned int nonempty_inbound_queue_count;
6007 bool displayed_warning;
6009 displayed_warning = false;
6010 start_jiffies = jiffies;
6011 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
6014 queued_io_count = pqi_queued_io_count(ctrl_info);
6015 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6016 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6018 pqi_check_ctrl_health(ctrl_info);
6019 if (pqi_ctrl_offline(ctrl_info))
6021 if (time_after(jiffies, warning_timeout)) {
6022 dev_warn(&ctrl_info->pci_dev->dev,
6023 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6024 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6025 displayed_warning = true;
6026 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
6028 usleep_range(1000, 2000);
6031 if (displayed_warning)
6032 dev_warn(&ctrl_info->pci_dev->dev,
6033 "queued I/O drained after waiting for %u seconds\n",
6034 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6039 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6040 struct pqi_scsi_dev *device)
6044 struct pqi_queue_group *queue_group;
6045 unsigned long flags;
6046 struct pqi_io_request *io_request;
6047 struct pqi_io_request *next;
6048 struct scsi_cmnd *scmd;
6049 struct pqi_scsi_dev *scsi_device;
6051 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6052 queue_group = &ctrl_info->queue_groups[i];
6054 for (path = 0; path < 2; path++) {
6056 &queue_group->submit_lock[path], flags);
6058 list_for_each_entry_safe(io_request, next,
6059 &queue_group->request_list[path],
6060 request_list_entry) {
6062 scmd = io_request->scmd;
6066 scsi_device = scmd->device->hostdata;
6067 if (scsi_device != device)
6070 list_del(&io_request->request_list_entry);
6071 set_host_byte(scmd, DID_RESET);
6072 pqi_free_io_request(io_request);
6073 scsi_dma_unmap(scmd);
6074 pqi_scsi_done(scmd);
6077 spin_unlock_irqrestore(
6078 &queue_group->submit_lock[path], flags);
6083 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
6085 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
6086 struct pqi_scsi_dev *device, unsigned long timeout_msecs)
6088 int cmds_outstanding;
6089 unsigned long start_jiffies;
6090 unsigned long warning_timeout;
6091 unsigned long msecs_waiting;
6093 start_jiffies = jiffies;
6094 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
6096 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) {
6097 pqi_check_ctrl_health(ctrl_info);
6098 if (pqi_ctrl_offline(ctrl_info))
6100 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6101 if (msecs_waiting >= timeout_msecs) {
6102 dev_err(&ctrl_info->pci_dev->dev,
6103 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6104 ctrl_info->scsi_host->host_no, device->bus, device->target,
6105 device->lun, msecs_waiting / 1000, cmds_outstanding);
6108 if (time_after(jiffies, warning_timeout)) {
6109 dev_warn(&ctrl_info->pci_dev->dev,
6110 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6111 ctrl_info->scsi_host->host_no, device->bus, device->target,
6112 device->lun, msecs_waiting / 1000, cmds_outstanding);
6113 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
6115 usleep_range(1000, 2000);
6121 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6124 struct completion *waiting = context;
6129 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
6131 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
6132 struct pqi_scsi_dev *device, struct completion *wait)
6135 unsigned int wait_secs;
6136 int cmds_outstanding;
6141 if (wait_for_completion_io_timeout(wait,
6142 PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
6147 pqi_check_ctrl_health(ctrl_info);
6148 if (pqi_ctrl_offline(ctrl_info)) {
6153 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
6154 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding);
6155 dev_warn(&ctrl_info->pci_dev->dev,
6156 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
6157 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, wait_secs, cmds_outstanding);
6163 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
6165 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
6168 struct pqi_io_request *io_request;
6169 DECLARE_COMPLETION_ONSTACK(wait);
6170 struct pqi_task_management_request *request;
6172 io_request = pqi_alloc_io_request(ctrl_info);
6173 io_request->io_complete_callback = pqi_lun_reset_complete;
6174 io_request->context = &wait;
6176 request = io_request->iu;
6177 memset(request, 0, sizeof(*request));
6179 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6180 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6181 &request->header.iu_length);
6182 put_unaligned_le16(io_request->index, &request->request_id);
6183 memcpy(request->lun_number, device->scsi3addr,
6184 sizeof(request->lun_number));
6185 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
6186 if (ctrl_info->tmf_iu_timeout_supported)
6187 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6189 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6192 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
6194 rc = io_request->status;
6196 pqi_free_io_request(io_request);
6201 #define PQI_LUN_RESET_RETRIES 3
6202 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
6203 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
6204 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
6206 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
6210 unsigned int retries;
6211 unsigned long timeout_msecs;
6213 for (retries = 0;;) {
6214 reset_rc = pqi_lun_reset(ctrl_info, device);
6215 if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
6217 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6220 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6221 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
6223 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs);
6224 if (wait_rc && reset_rc == 0)
6227 return reset_rc == 0 ? SUCCESS : FAILED;
6230 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
6231 struct pqi_scsi_dev *device)
6235 pqi_ctrl_block_requests(ctrl_info);
6236 pqi_ctrl_wait_until_quiesced(ctrl_info);
6237 pqi_fail_io_queued_for_device(ctrl_info, device);
6238 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6242 rc = pqi_lun_reset_with_retries(ctrl_info, device);
6243 pqi_ctrl_unblock_requests(ctrl_info);
6248 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6251 struct Scsi_Host *shost;
6252 struct pqi_ctrl_info *ctrl_info;
6253 struct pqi_scsi_dev *device;
6255 shost = scmd->device->host;
6256 ctrl_info = shost_to_hba(shost);
6257 device = scmd->device->hostdata;
6259 mutex_lock(&ctrl_info->lun_reset_mutex);
6261 dev_err(&ctrl_info->pci_dev->dev,
6262 "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
6264 device->bus, device->target, device->lun,
6265 scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
6267 pqi_check_ctrl_health(ctrl_info);
6268 if (pqi_ctrl_offline(ctrl_info))
6271 rc = pqi_device_reset(ctrl_info, device);
6273 dev_err(&ctrl_info->pci_dev->dev,
6274 "reset of scsi %d:%d:%d:%d: %s\n",
6275 shost->host_no, device->bus, device->target, device->lun,
6276 rc == SUCCESS ? "SUCCESS" : "FAILED");
6278 mutex_unlock(&ctrl_info->lun_reset_mutex);
6283 static int pqi_slave_alloc(struct scsi_device *sdev)
6285 struct pqi_scsi_dev *device;
6286 unsigned long flags;
6287 struct pqi_ctrl_info *ctrl_info;
6288 struct scsi_target *starget;
6289 struct sas_rphy *rphy;
6291 ctrl_info = shost_to_hba(sdev->host);
6293 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6295 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6296 starget = scsi_target(sdev);
6297 rphy = target_to_rphy(starget);
6298 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6300 if (device->target_lun_valid) {
6301 device->ignore_device = true;
6303 device->target = sdev_id(sdev);
6304 device->lun = sdev->lun;
6305 device->target_lun_valid = true;
6309 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6310 sdev_id(sdev), sdev->lun);
6314 sdev->hostdata = device;
6315 device->sdev = sdev;
6316 if (device->queue_depth) {
6317 device->advertised_queue_depth = device->queue_depth;
6318 scsi_change_queue_depth(sdev,
6319 device->advertised_queue_depth);
6321 if (pqi_is_logical_device(device)) {
6322 pqi_disable_write_same(sdev);
6324 sdev->allow_restart = 1;
6325 if (device->device_type == SA_DEVICE_TYPE_NVME)
6326 pqi_disable_write_same(sdev);
6330 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6335 static int pqi_map_queues(struct Scsi_Host *shost)
6337 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6339 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6340 ctrl_info->pci_dev, 0);
6343 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6345 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6348 static int pqi_slave_configure(struct scsi_device *sdev)
6351 struct pqi_scsi_dev *device;
6353 device = sdev->hostdata;
6354 device->devtype = sdev->type;
6356 if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6358 device->ignore_device = false;
6364 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6366 struct pci_dev *pci_dev;
6367 u32 subsystem_vendor;
6368 u32 subsystem_device;
6369 cciss_pci_info_struct pciinfo;
6374 pci_dev = ctrl_info->pci_dev;
6376 pciinfo.domain = pci_domain_nr(pci_dev->bus);
6377 pciinfo.bus = pci_dev->bus->number;
6378 pciinfo.dev_fn = pci_dev->devfn;
6379 subsystem_vendor = pci_dev->subsystem_vendor;
6380 subsystem_device = pci_dev->subsystem_device;
6381 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6383 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
6389 static int pqi_getdrivver_ioctl(void __user *arg)
6396 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6397 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6399 if (copy_to_user(arg, &version, sizeof(version)))
6405 struct ciss_error_info {
6408 size_t sense_data_length;
6411 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6412 struct ciss_error_info *ciss_error_info)
6414 int ciss_cmd_status;
6415 size_t sense_data_length;
6417 switch (pqi_error_info->data_out_result) {
6418 case PQI_DATA_IN_OUT_GOOD:
6419 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6421 case PQI_DATA_IN_OUT_UNDERFLOW:
6422 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6424 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6425 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6427 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6428 case PQI_DATA_IN_OUT_BUFFER_ERROR:
6429 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6430 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6431 case PQI_DATA_IN_OUT_ERROR:
6432 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6434 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6435 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6436 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6437 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6438 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6439 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6440 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6441 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6442 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6443 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6444 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6446 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6447 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6449 case PQI_DATA_IN_OUT_ABORTED:
6450 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6452 case PQI_DATA_IN_OUT_TIMEOUT:
6453 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6456 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6461 get_unaligned_le16(&pqi_error_info->sense_data_length);
6462 if (sense_data_length == 0)
6464 get_unaligned_le16(&pqi_error_info->response_data_length);
6465 if (sense_data_length)
6466 if (sense_data_length > sizeof(pqi_error_info->data))
6467 sense_data_length = sizeof(pqi_error_info->data);
6469 ciss_error_info->scsi_status = pqi_error_info->status;
6470 ciss_error_info->command_status = ciss_cmd_status;
6471 ciss_error_info->sense_data_length = sense_data_length;
6474 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6477 char *kernel_buffer = NULL;
6479 size_t sense_data_length;
6480 IOCTL_Command_struct iocommand;
6481 struct pqi_raid_path_request request;
6482 struct pqi_raid_error_info pqi_error_info;
6483 struct ciss_error_info ciss_error_info;
6485 if (pqi_ctrl_offline(ctrl_info))
6487 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6491 if (!capable(CAP_SYS_RAWIO))
6493 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6495 if (iocommand.buf_size < 1 &&
6496 iocommand.Request.Type.Direction != XFER_NONE)
6498 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6500 if (iocommand.Request.Type.Type != TYPE_CMD)
6503 switch (iocommand.Request.Type.Direction) {
6507 case XFER_READ | XFER_WRITE:
6513 if (iocommand.buf_size > 0) {
6514 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6517 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6518 if (copy_from_user(kernel_buffer, iocommand.buf,
6519 iocommand.buf_size)) {
6524 memset(kernel_buffer, 0, iocommand.buf_size);
6528 memset(&request, 0, sizeof(request));
6530 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6531 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6532 PQI_REQUEST_HEADER_LENGTH;
6533 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6534 sizeof(request.lun_number));
6535 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6536 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6538 switch (iocommand.Request.Type.Direction) {
6540 request.data_direction = SOP_NO_DIRECTION_FLAG;
6543 request.data_direction = SOP_WRITE_FLAG;
6546 request.data_direction = SOP_READ_FLAG;
6548 case XFER_READ | XFER_WRITE:
6549 request.data_direction = SOP_BIDIRECTIONAL;
6553 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6555 if (iocommand.buf_size > 0) {
6556 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6558 rc = pqi_map_single(ctrl_info->pci_dev,
6559 &request.sg_descriptors[0], kernel_buffer,
6560 iocommand.buf_size, DMA_BIDIRECTIONAL);
6564 iu_length += sizeof(request.sg_descriptors[0]);
6567 put_unaligned_le16(iu_length, &request.header.iu_length);
6569 if (ctrl_info->raid_iu_timeout_supported)
6570 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6572 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6573 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6575 if (iocommand.buf_size > 0)
6576 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6579 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6582 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6583 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6584 iocommand.error_info.CommandStatus =
6585 ciss_error_info.command_status;
6586 sense_data_length = ciss_error_info.sense_data_length;
6587 if (sense_data_length) {
6588 if (sense_data_length >
6589 sizeof(iocommand.error_info.SenseInfo))
6591 sizeof(iocommand.error_info.SenseInfo);
6592 memcpy(iocommand.error_info.SenseInfo,
6593 pqi_error_info.data, sense_data_length);
6594 iocommand.error_info.SenseLen = sense_data_length;
6598 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6603 if (rc == 0 && iocommand.buf_size > 0 &&
6604 (iocommand.Request.Type.Direction & XFER_READ)) {
6605 if (copy_to_user(iocommand.buf, kernel_buffer,
6606 iocommand.buf_size)) {
6612 kfree(kernel_buffer);
6617 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6621 struct pqi_ctrl_info *ctrl_info;
6623 ctrl_info = shost_to_hba(sdev->host);
6626 case CCISS_DEREGDISK:
6627 case CCISS_REGNEWDISK:
6629 rc = pqi_scan_scsi_devices(ctrl_info);
6631 case CCISS_GETPCIINFO:
6632 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6634 case CCISS_GETDRIVVER:
6635 rc = pqi_getdrivver_ioctl(arg);
6637 case CCISS_PASSTHRU:
6638 rc = pqi_passthru_ioctl(ctrl_info, arg);
6648 static ssize_t pqi_firmware_version_show(struct device *dev,
6649 struct device_attribute *attr, char *buffer)
6651 struct Scsi_Host *shost;
6652 struct pqi_ctrl_info *ctrl_info;
6654 shost = class_to_shost(dev);
6655 ctrl_info = shost_to_hba(shost);
6657 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6660 static ssize_t pqi_driver_version_show(struct device *dev,
6661 struct device_attribute *attr, char *buffer)
6663 return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6666 static ssize_t pqi_serial_number_show(struct device *dev,
6667 struct device_attribute *attr, char *buffer)
6669 struct Scsi_Host *shost;
6670 struct pqi_ctrl_info *ctrl_info;
6672 shost = class_to_shost(dev);
6673 ctrl_info = shost_to_hba(shost);
6675 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6678 static ssize_t pqi_model_show(struct device *dev,
6679 struct device_attribute *attr, char *buffer)
6681 struct Scsi_Host *shost;
6682 struct pqi_ctrl_info *ctrl_info;
6684 shost = class_to_shost(dev);
6685 ctrl_info = shost_to_hba(shost);
6687 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6690 static ssize_t pqi_vendor_show(struct device *dev,
6691 struct device_attribute *attr, char *buffer)
6693 struct Scsi_Host *shost;
6694 struct pqi_ctrl_info *ctrl_info;
6696 shost = class_to_shost(dev);
6697 ctrl_info = shost_to_hba(shost);
6699 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6702 static ssize_t pqi_host_rescan_store(struct device *dev,
6703 struct device_attribute *attr, const char *buffer, size_t count)
6705 struct Scsi_Host *shost = class_to_shost(dev);
6707 pqi_scan_start(shost);
6712 static ssize_t pqi_lockup_action_show(struct device *dev,
6713 struct device_attribute *attr, char *buffer)
6718 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6719 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6720 count += scnprintf(buffer + count, PAGE_SIZE - count,
6721 "[%s] ", pqi_lockup_actions[i].name);
6723 count += scnprintf(buffer + count, PAGE_SIZE - count,
6724 "%s ", pqi_lockup_actions[i].name);
6727 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6732 static ssize_t pqi_lockup_action_store(struct device *dev,
6733 struct device_attribute *attr, const char *buffer, size_t count)
6737 char action_name_buffer[32];
6739 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6740 action_name = strstrip(action_name_buffer);
6742 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6743 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6744 pqi_lockup_action = pqi_lockup_actions[i].action;
6752 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6753 struct device_attribute *attr, char *buffer)
6755 struct Scsi_Host *shost = class_to_shost(dev);
6756 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6758 return scnprintf(buffer, 10, "%x\n",
6759 ctrl_info->enable_stream_detection);
6762 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
6763 struct device_attribute *attr, const char *buffer, size_t count)
6765 struct Scsi_Host *shost = class_to_shost(dev);
6766 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6767 u8 set_stream_detection = 0;
6769 if (kstrtou8(buffer, 0, &set_stream_detection))
6772 if (set_stream_detection > 0)
6773 set_stream_detection = 1;
6775 ctrl_info->enable_stream_detection = set_stream_detection;
6780 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
6781 struct device_attribute *attr, char *buffer)
6783 struct Scsi_Host *shost = class_to_shost(dev);
6784 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6786 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
6789 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
6790 struct device_attribute *attr, const char *buffer, size_t count)
6792 struct Scsi_Host *shost = class_to_shost(dev);
6793 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6794 u8 set_r5_writes = 0;
6796 if (kstrtou8(buffer, 0, &set_r5_writes))
6799 if (set_r5_writes > 0)
6802 ctrl_info->enable_r5_writes = set_r5_writes;
6807 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
6808 struct device_attribute *attr, char *buffer)
6810 struct Scsi_Host *shost = class_to_shost(dev);
6811 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6813 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
6816 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
6817 struct device_attribute *attr, const char *buffer, size_t count)
6819 struct Scsi_Host *shost = class_to_shost(dev);
6820 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6821 u8 set_r6_writes = 0;
6823 if (kstrtou8(buffer, 0, &set_r6_writes))
6826 if (set_r6_writes > 0)
6829 ctrl_info->enable_r6_writes = set_r6_writes;
6834 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6835 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6836 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6837 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6838 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6839 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6840 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
6841 pqi_lockup_action_store);
6842 static DEVICE_ATTR(enable_stream_detection, 0644,
6843 pqi_host_enable_stream_detection_show,
6844 pqi_host_enable_stream_detection_store);
6845 static DEVICE_ATTR(enable_r5_writes, 0644,
6846 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
6847 static DEVICE_ATTR(enable_r6_writes, 0644,
6848 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
6850 static struct attribute *pqi_shost_attrs[] = {
6851 &dev_attr_driver_version.attr,
6852 &dev_attr_firmware_version.attr,
6853 &dev_attr_model.attr,
6854 &dev_attr_serial_number.attr,
6855 &dev_attr_vendor.attr,
6856 &dev_attr_rescan.attr,
6857 &dev_attr_lockup_action.attr,
6858 &dev_attr_enable_stream_detection.attr,
6859 &dev_attr_enable_r5_writes.attr,
6860 &dev_attr_enable_r6_writes.attr,
6864 ATTRIBUTE_GROUPS(pqi_shost);
6866 static ssize_t pqi_unique_id_show(struct device *dev,
6867 struct device_attribute *attr, char *buffer)
6869 struct pqi_ctrl_info *ctrl_info;
6870 struct scsi_device *sdev;
6871 struct pqi_scsi_dev *device;
6872 unsigned long flags;
6875 sdev = to_scsi_device(dev);
6876 ctrl_info = shost_to_hba(sdev->host);
6878 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6880 device = sdev->hostdata;
6882 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6886 if (device->is_physical_device)
6887 memcpy(unique_id, device->wwid, sizeof(device->wwid));
6889 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
6891 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6893 return scnprintf(buffer, PAGE_SIZE,
6894 "%02X%02X%02X%02X%02X%02X%02X%02X"
6895 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
6896 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
6897 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
6898 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
6899 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
6902 static ssize_t pqi_lunid_show(struct device *dev,
6903 struct device_attribute *attr, char *buffer)
6905 struct pqi_ctrl_info *ctrl_info;
6906 struct scsi_device *sdev;
6907 struct pqi_scsi_dev *device;
6908 unsigned long flags;
6911 sdev = to_scsi_device(dev);
6912 ctrl_info = shost_to_hba(sdev->host);
6914 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6916 device = sdev->hostdata;
6918 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6922 memcpy(lunid, device->scsi3addr, sizeof(lunid));
6924 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6926 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6931 static ssize_t pqi_path_info_show(struct device *dev,
6932 struct device_attribute *attr, char *buf)
6934 struct pqi_ctrl_info *ctrl_info;
6935 struct scsi_device *sdev;
6936 struct pqi_scsi_dev *device;
6937 unsigned long flags;
6944 u8 phys_connector[2];
6946 sdev = to_scsi_device(dev);
6947 ctrl_info = shost_to_hba(sdev->host);
6949 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6951 device = sdev->hostdata;
6953 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6958 for (i = 0; i < MAX_PATHS; i++) {
6959 path_map_index = 1 << i;
6960 if (i == device->active_path_index)
6962 else if (device->path_map & path_map_index)
6963 active = "Inactive";
6967 output_len += scnprintf(buf + output_len,
6968 PAGE_SIZE - output_len,
6969 "[%d:%d:%d:%d] %20.20s ",
6970 ctrl_info->scsi_host->host_no,
6971 device->bus, device->target,
6973 scsi_device_type(device->devtype));
6975 if (device->devtype == TYPE_RAID ||
6976 pqi_is_logical_device(device))
6979 memcpy(&phys_connector, &device->phys_connector[i],
6980 sizeof(phys_connector));
6981 if (phys_connector[0] < '0')
6982 phys_connector[0] = '0';
6983 if (phys_connector[1] < '0')
6984 phys_connector[1] = '0';
6986 output_len += scnprintf(buf + output_len,
6987 PAGE_SIZE - output_len,
6988 "PORT: %.2s ", phys_connector);
6990 box = device->box[i];
6991 if (box != 0 && box != 0xFF)
6992 output_len += scnprintf(buf + output_len,
6993 PAGE_SIZE - output_len,
6996 if ((device->devtype == TYPE_DISK ||
6997 device->devtype == TYPE_ZBC) &&
6998 pqi_expose_device(device))
6999 output_len += scnprintf(buf + output_len,
7000 PAGE_SIZE - output_len,
7004 output_len += scnprintf(buf + output_len,
7005 PAGE_SIZE - output_len,
7009 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7014 static ssize_t pqi_sas_address_show(struct device *dev,
7015 struct device_attribute *attr, char *buffer)
7017 struct pqi_ctrl_info *ctrl_info;
7018 struct scsi_device *sdev;
7019 struct pqi_scsi_dev *device;
7020 unsigned long flags;
7023 sdev = to_scsi_device(dev);
7024 ctrl_info = shost_to_hba(sdev->host);
7026 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7028 device = sdev->hostdata;
7029 if (!device || !pqi_is_device_with_sas_address(device)) {
7030 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7034 sas_address = device->sas_address;
7036 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7038 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
7041 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7042 struct device_attribute *attr, char *buffer)
7044 struct pqi_ctrl_info *ctrl_info;
7045 struct scsi_device *sdev;
7046 struct pqi_scsi_dev *device;
7047 unsigned long flags;
7049 sdev = to_scsi_device(dev);
7050 ctrl_info = shost_to_hba(sdev->host);
7052 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7054 device = sdev->hostdata;
7056 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7060 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
7064 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7069 static ssize_t pqi_raid_level_show(struct device *dev,
7070 struct device_attribute *attr, char *buffer)
7072 struct pqi_ctrl_info *ctrl_info;
7073 struct scsi_device *sdev;
7074 struct pqi_scsi_dev *device;
7075 unsigned long flags;
7078 sdev = to_scsi_device(dev);
7079 ctrl_info = shost_to_hba(sdev->host);
7081 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7083 device = sdev->hostdata;
7085 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7089 if (pqi_is_logical_device(device))
7090 raid_level = pqi_raid_level_to_string(device->raid_level);
7094 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7096 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
7099 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7100 struct device_attribute *attr, char *buffer)
7102 struct pqi_ctrl_info *ctrl_info;
7103 struct scsi_device *sdev;
7104 struct pqi_scsi_dev *device;
7105 unsigned long flags;
7106 int raid_bypass_cnt;
7108 sdev = to_scsi_device(dev);
7109 ctrl_info = shost_to_hba(sdev->host);
7111 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7113 device = sdev->hostdata;
7115 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7119 raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
7121 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7123 return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
7126 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7127 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7128 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
7129 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
7130 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
7131 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
7132 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
7134 static struct attribute *pqi_sdev_attrs[] = {
7135 &dev_attr_lunid.attr,
7136 &dev_attr_unique_id.attr,
7137 &dev_attr_path_info.attr,
7138 &dev_attr_sas_address.attr,
7139 &dev_attr_ssd_smart_path_enabled.attr,
7140 &dev_attr_raid_level.attr,
7141 &dev_attr_raid_bypass_cnt.attr,
7145 ATTRIBUTE_GROUPS(pqi_sdev);
7147 static struct scsi_host_template pqi_driver_template = {
7148 .module = THIS_MODULE,
7149 .name = DRIVER_NAME_SHORT,
7150 .proc_name = DRIVER_NAME_SHORT,
7151 .queuecommand = pqi_scsi_queue_command,
7152 .scan_start = pqi_scan_start,
7153 .scan_finished = pqi_scan_finished,
7155 .eh_device_reset_handler = pqi_eh_device_reset_handler,
7157 .slave_alloc = pqi_slave_alloc,
7158 .slave_configure = pqi_slave_configure,
7159 .map_queues = pqi_map_queues,
7160 .sdev_groups = pqi_sdev_groups,
7161 .shost_groups = pqi_shost_groups,
7164 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7167 struct Scsi_Host *shost;
7169 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7171 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
7176 shost->n_io_port = 0;
7177 shost->this_id = -1;
7178 shost->max_channel = PQI_MAX_BUS;
7179 shost->max_cmd_len = MAX_COMMAND_SIZE;
7180 shost->max_lun = ~0;
7182 shost->max_sectors = ctrl_info->max_sectors;
7183 shost->can_queue = ctrl_info->scsi_ml_can_queue;
7184 shost->cmd_per_lun = shost->can_queue;
7185 shost->sg_tablesize = ctrl_info->sg_tablesize;
7186 shost->transportt = pqi_sas_transport_template;
7187 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
7188 shost->unique_id = shost->irq;
7189 shost->nr_hw_queues = ctrl_info->num_queue_groups;
7190 shost->host_tagset = 1;
7191 shost->hostdata[0] = (unsigned long)ctrl_info;
7193 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7195 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
7199 rc = pqi_add_sas_host(shost, ctrl_info);
7201 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
7205 ctrl_info->scsi_host = shost;
7210 scsi_remove_host(shost);
7212 scsi_host_put(shost);
7217 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7219 struct Scsi_Host *shost;
7221 pqi_delete_sas_host(ctrl_info);
7223 shost = ctrl_info->scsi_host;
7227 scsi_remove_host(shost);
7228 scsi_host_put(shost);
7231 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7234 struct pqi_device_registers __iomem *pqi_registers;
7235 unsigned long timeout;
7236 unsigned int timeout_msecs;
7237 union pqi_reset_register reset_reg;
7239 pqi_registers = ctrl_info->pqi_registers;
7240 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7241 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7244 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7245 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7246 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7248 pqi_check_ctrl_health(ctrl_info);
7249 if (pqi_ctrl_offline(ctrl_info)) {
7253 if (time_after(jiffies, timeout)) {
7262 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7265 union pqi_reset_register reset_reg;
7267 if (ctrl_info->pqi_reset_quiesce_supported) {
7268 rc = sis_pqi_reset_quiesce(ctrl_info);
7270 dev_err(&ctrl_info->pci_dev->dev,
7271 "PQI reset failed during quiesce with error %d\n", rc);
7276 reset_reg.all_bits = 0;
7277 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7278 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7280 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7282 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7284 dev_err(&ctrl_info->pci_dev->dev,
7285 "PQI reset failed with error %d\n", rc);
7290 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7293 struct bmic_sense_subsystem_info *sense_info;
7295 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7299 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7303 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7304 sizeof(sense_info->ctrl_serial_number));
7305 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7313 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7316 struct bmic_identify_controller *identify;
7318 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7322 rc = pqi_identify_controller(ctrl_info, identify);
7326 if (get_unaligned_le32(&identify->extra_controller_flags) &
7327 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7328 memcpy(ctrl_info->firmware_version,
7329 identify->firmware_version_long,
7330 sizeof(identify->firmware_version_long));
7332 memcpy(ctrl_info->firmware_version,
7333 identify->firmware_version_short,
7334 sizeof(identify->firmware_version_short));
7335 ctrl_info->firmware_version
7336 [sizeof(identify->firmware_version_short)] = '\0';
7337 snprintf(ctrl_info->firmware_version +
7338 strlen(ctrl_info->firmware_version),
7339 sizeof(ctrl_info->firmware_version) -
7340 sizeof(identify->firmware_version_short),
7342 get_unaligned_le16(&identify->firmware_build_number));
7345 memcpy(ctrl_info->model, identify->product_id,
7346 sizeof(identify->product_id));
7347 ctrl_info->model[sizeof(identify->product_id)] = '\0';
7349 memcpy(ctrl_info->vendor, identify->vendor_id,
7350 sizeof(identify->vendor_id));
7351 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7359 struct pqi_config_table_section_info {
7360 struct pqi_ctrl_info *ctrl_info;
7363 void __iomem *section_iomem_addr;
7366 static inline bool pqi_is_firmware_feature_supported(
7367 struct pqi_config_table_firmware_features *firmware_features,
7368 unsigned int bit_position)
7370 unsigned int byte_index;
7372 byte_index = bit_position / BITS_PER_BYTE;
7374 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7377 return firmware_features->features_supported[byte_index] &
7378 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7381 static inline bool pqi_is_firmware_feature_enabled(
7382 struct pqi_config_table_firmware_features *firmware_features,
7383 void __iomem *firmware_features_iomem_addr,
7384 unsigned int bit_position)
7386 unsigned int byte_index;
7387 u8 __iomem *features_enabled_iomem_addr;
7389 byte_index = (bit_position / BITS_PER_BYTE) +
7390 (le16_to_cpu(firmware_features->num_elements) * 2);
7392 features_enabled_iomem_addr = firmware_features_iomem_addr +
7393 offsetof(struct pqi_config_table_firmware_features,
7394 features_supported) + byte_index;
7396 return *((__force u8 *)features_enabled_iomem_addr) &
7397 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7400 static inline void pqi_request_firmware_feature(
7401 struct pqi_config_table_firmware_features *firmware_features,
7402 unsigned int bit_position)
7404 unsigned int byte_index;
7406 byte_index = (bit_position / BITS_PER_BYTE) +
7407 le16_to_cpu(firmware_features->num_elements);
7409 firmware_features->features_supported[byte_index] |=
7410 (1 << (bit_position % BITS_PER_BYTE));
7413 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7414 u16 first_section, u16 last_section)
7416 struct pqi_vendor_general_request request;
7418 memset(&request, 0, sizeof(request));
7420 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7421 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7422 &request.header.iu_length);
7423 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7424 &request.function_code);
7425 put_unaligned_le16(first_section,
7426 &request.data.config_table_update.first_section);
7427 put_unaligned_le16(last_section,
7428 &request.data.config_table_update.last_section);
7430 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7433 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7434 struct pqi_config_table_firmware_features *firmware_features,
7435 void __iomem *firmware_features_iomem_addr)
7437 void *features_requested;
7438 void __iomem *features_requested_iomem_addr;
7439 void __iomem *host_max_known_feature_iomem_addr;
7441 features_requested = firmware_features->features_supported +
7442 le16_to_cpu(firmware_features->num_elements);
7444 features_requested_iomem_addr = firmware_features_iomem_addr +
7445 (features_requested - (void *)firmware_features);
7447 memcpy_toio(features_requested_iomem_addr, features_requested,
7448 le16_to_cpu(firmware_features->num_elements));
7450 if (pqi_is_firmware_feature_supported(firmware_features,
7451 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7452 host_max_known_feature_iomem_addr =
7453 features_requested_iomem_addr +
7454 (le16_to_cpu(firmware_features->num_elements) * 2) +
7456 writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
7457 host_max_known_feature_iomem_addr);
7460 return pqi_config_table_update(ctrl_info,
7461 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7462 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7465 struct pqi_firmware_feature {
7467 unsigned int feature_bit;
7470 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7471 struct pqi_firmware_feature *firmware_feature);
7474 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7475 struct pqi_firmware_feature *firmware_feature)
7477 if (!firmware_feature->supported) {
7478 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7479 firmware_feature->feature_name);
7483 if (firmware_feature->enabled) {
7484 dev_info(&ctrl_info->pci_dev->dev,
7485 "%s enabled\n", firmware_feature->feature_name);
7489 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7490 firmware_feature->feature_name);
7493 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7494 struct pqi_firmware_feature *firmware_feature)
7496 switch (firmware_feature->feature_bit) {
7497 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7498 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7500 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7501 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7503 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7504 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7506 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7507 ctrl_info->soft_reset_handshake_supported =
7508 firmware_feature->enabled &&
7509 pqi_read_soft_reset_status(ctrl_info);
7511 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7512 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7514 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7515 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7517 case PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN:
7518 ctrl_info->unique_wwid_in_report_phys_lun_supported =
7519 firmware_feature->enabled;
7521 case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7522 ctrl_info->firmware_triage_supported = firmware_feature->enabled;
7523 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
7525 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7526 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7530 pqi_firmware_feature_status(ctrl_info, firmware_feature);
7533 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7534 struct pqi_firmware_feature *firmware_feature)
7536 if (firmware_feature->feature_status)
7537 firmware_feature->feature_status(ctrl_info, firmware_feature);
7540 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7542 static struct pqi_firmware_feature pqi_firmware_features[] = {
7544 .feature_name = "Online Firmware Activation",
7545 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7546 .feature_status = pqi_firmware_feature_status,
7549 .feature_name = "Serial Management Protocol",
7550 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7551 .feature_status = pqi_firmware_feature_status,
7554 .feature_name = "Maximum Known Feature",
7555 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7556 .feature_status = pqi_firmware_feature_status,
7559 .feature_name = "RAID 0 Read Bypass",
7560 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7561 .feature_status = pqi_firmware_feature_status,
7564 .feature_name = "RAID 1 Read Bypass",
7565 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7566 .feature_status = pqi_firmware_feature_status,
7569 .feature_name = "RAID 5 Read Bypass",
7570 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7571 .feature_status = pqi_firmware_feature_status,
7574 .feature_name = "RAID 6 Read Bypass",
7575 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7576 .feature_status = pqi_firmware_feature_status,
7579 .feature_name = "RAID 0 Write Bypass",
7580 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7581 .feature_status = pqi_firmware_feature_status,
7584 .feature_name = "RAID 1 Write Bypass",
7585 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7586 .feature_status = pqi_ctrl_update_feature_flags,
7589 .feature_name = "RAID 5 Write Bypass",
7590 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7591 .feature_status = pqi_ctrl_update_feature_flags,
7594 .feature_name = "RAID 6 Write Bypass",
7595 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7596 .feature_status = pqi_ctrl_update_feature_flags,
7599 .feature_name = "New Soft Reset Handshake",
7600 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
7601 .feature_status = pqi_ctrl_update_feature_flags,
7604 .feature_name = "RAID IU Timeout",
7605 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
7606 .feature_status = pqi_ctrl_update_feature_flags,
7609 .feature_name = "TMF IU Timeout",
7610 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
7611 .feature_status = pqi_ctrl_update_feature_flags,
7614 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
7615 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
7616 .feature_status = pqi_firmware_feature_status,
7619 .feature_name = "Unique WWID in Report Physical LUN",
7620 .feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN,
7621 .feature_status = pqi_ctrl_update_feature_flags,
7624 .feature_name = "Firmware Triage",
7625 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
7626 .feature_status = pqi_ctrl_update_feature_flags,
7629 .feature_name = "RPL Extended Formats 4 and 5",
7630 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
7631 .feature_status = pqi_ctrl_update_feature_flags,
7635 static void pqi_process_firmware_features(
7636 struct pqi_config_table_section_info *section_info)
7639 struct pqi_ctrl_info *ctrl_info;
7640 struct pqi_config_table_firmware_features *firmware_features;
7641 void __iomem *firmware_features_iomem_addr;
7643 unsigned int num_features_supported;
7645 ctrl_info = section_info->ctrl_info;
7646 firmware_features = section_info->section;
7647 firmware_features_iomem_addr = section_info->section_iomem_addr;
7649 for (i = 0, num_features_supported = 0;
7650 i < ARRAY_SIZE(pqi_firmware_features); i++) {
7651 if (pqi_is_firmware_feature_supported(firmware_features,
7652 pqi_firmware_features[i].feature_bit)) {
7653 pqi_firmware_features[i].supported = true;
7654 num_features_supported++;
7656 pqi_firmware_feature_update(ctrl_info,
7657 &pqi_firmware_features[i]);
7661 if (num_features_supported == 0)
7664 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7665 if (!pqi_firmware_features[i].supported)
7667 pqi_request_firmware_feature(firmware_features,
7668 pqi_firmware_features[i].feature_bit);
7671 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
7672 firmware_features_iomem_addr);
7674 dev_err(&ctrl_info->pci_dev->dev,
7675 "failed to enable firmware features in PQI configuration table\n");
7676 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7677 if (!pqi_firmware_features[i].supported)
7679 pqi_firmware_feature_update(ctrl_info,
7680 &pqi_firmware_features[i]);
7685 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7686 if (!pqi_firmware_features[i].supported)
7688 if (pqi_is_firmware_feature_enabled(firmware_features,
7689 firmware_features_iomem_addr,
7690 pqi_firmware_features[i].feature_bit)) {
7691 pqi_firmware_features[i].enabled = true;
7693 pqi_firmware_feature_update(ctrl_info,
7694 &pqi_firmware_features[i]);
7698 static void pqi_init_firmware_features(void)
7702 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7703 pqi_firmware_features[i].supported = false;
7704 pqi_firmware_features[i].enabled = false;
7708 static void pqi_process_firmware_features_section(
7709 struct pqi_config_table_section_info *section_info)
7711 mutex_lock(&pqi_firmware_features_mutex);
7712 pqi_init_firmware_features();
7713 pqi_process_firmware_features(section_info);
7714 mutex_unlock(&pqi_firmware_features_mutex);
7718 * Reset all controller settings that can be initialized during the processing
7719 * of the PQI Configuration Table.
7722 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
7724 ctrl_info->heartbeat_counter = NULL;
7725 ctrl_info->soft_reset_status = NULL;
7726 ctrl_info->soft_reset_handshake_supported = false;
7727 ctrl_info->enable_r1_writes = false;
7728 ctrl_info->enable_r5_writes = false;
7729 ctrl_info->enable_r6_writes = false;
7730 ctrl_info->raid_iu_timeout_supported = false;
7731 ctrl_info->tmf_iu_timeout_supported = false;
7732 ctrl_info->unique_wwid_in_report_phys_lun_supported = false;
7733 ctrl_info->firmware_triage_supported = false;
7734 ctrl_info->rpl_extended_format_4_5_supported = false;
7737 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7741 bool firmware_feature_section_present;
7742 void __iomem *table_iomem_addr;
7743 struct pqi_config_table *config_table;
7744 struct pqi_config_table_section_header *section;
7745 struct pqi_config_table_section_info section_info;
7746 struct pqi_config_table_section_info feature_section_info;
7748 table_length = ctrl_info->config_table_length;
7749 if (table_length == 0)
7752 config_table = kmalloc(table_length, GFP_KERNEL);
7753 if (!config_table) {
7754 dev_err(&ctrl_info->pci_dev->dev,
7755 "failed to allocate memory for PQI configuration table\n");
7760 * Copy the config table contents from I/O memory space into the
7763 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
7764 memcpy_fromio(config_table, table_iomem_addr, table_length);
7766 firmware_feature_section_present = false;
7767 section_info.ctrl_info = ctrl_info;
7768 section_offset = get_unaligned_le32(&config_table->first_section_offset);
7770 while (section_offset) {
7771 section = (void *)config_table + section_offset;
7773 section_info.section = section;
7774 section_info.section_offset = section_offset;
7775 section_info.section_iomem_addr = table_iomem_addr + section_offset;
7777 switch (get_unaligned_le16(§ion->section_id)) {
7778 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7779 firmware_feature_section_present = true;
7780 feature_section_info = section_info;
7782 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
7783 if (pqi_disable_heartbeat)
7784 dev_warn(&ctrl_info->pci_dev->dev,
7785 "heartbeat disabled by module parameter\n");
7787 ctrl_info->heartbeat_counter =
7790 offsetof(struct pqi_config_table_heartbeat,
7793 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7794 ctrl_info->soft_reset_status =
7797 offsetof(struct pqi_config_table_soft_reset,
7802 section_offset = get_unaligned_le16(§ion->next_section_offset);
7806 * We process the firmware feature section after all other sections
7807 * have been processed so that the feature bit callbacks can take
7808 * into account the settings configured by other sections.
7810 if (firmware_feature_section_present)
7811 pqi_process_firmware_features_section(&feature_section_info);
7813 kfree(config_table);
7818 /* Switches the controller from PQI mode back into SIS mode. */
7820 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7824 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
7825 rc = pqi_reset(ctrl_info);
7828 rc = sis_reenable_sis_mode(ctrl_info);
7830 dev_err(&ctrl_info->pci_dev->dev,
7831 "re-enabling SIS mode failed with error %d\n", rc);
7834 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7840 * If the controller isn't already in SIS mode, this function forces it into
7844 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
7846 if (!sis_is_firmware_running(ctrl_info))
7849 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
7852 if (sis_is_kernel_up(ctrl_info)) {
7853 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7857 return pqi_revert_to_sis_mode(ctrl_info);
7860 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7865 if (reset_devices) {
7866 if (pqi_is_fw_triage_supported(ctrl_info)) {
7867 rc = sis_wait_for_fw_triage_completion(ctrl_info);
7871 sis_soft_reset(ctrl_info);
7872 msleep(PQI_POST_RESET_DELAY_SECS * PQI_HZ);
7874 rc = pqi_force_sis_mode(ctrl_info);
7880 * Wait until the controller is ready to start accepting SIS
7883 rc = sis_wait_for_ctrl_ready(ctrl_info);
7888 * Get the controller properties. This allows us to determine
7889 * whether or not it supports PQI mode.
7891 rc = sis_get_ctrl_properties(ctrl_info);
7893 dev_err(&ctrl_info->pci_dev->dev,
7894 "error obtaining controller properties\n");
7898 rc = sis_get_pqi_capabilities(ctrl_info);
7900 dev_err(&ctrl_info->pci_dev->dev,
7901 "error obtaining controller capabilities\n");
7905 product_id = sis_get_product_id(ctrl_info);
7906 ctrl_info->product_id = (u8)product_id;
7907 ctrl_info->product_revision = (u8)(product_id >> 8);
7909 if (reset_devices) {
7910 if (ctrl_info->max_outstanding_requests >
7911 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
7912 ctrl_info->max_outstanding_requests =
7913 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
7915 if (ctrl_info->max_outstanding_requests >
7916 PQI_MAX_OUTSTANDING_REQUESTS)
7917 ctrl_info->max_outstanding_requests =
7918 PQI_MAX_OUTSTANDING_REQUESTS;
7921 pqi_calculate_io_resources(ctrl_info);
7923 rc = pqi_alloc_error_buffer(ctrl_info);
7925 dev_err(&ctrl_info->pci_dev->dev,
7926 "failed to allocate PQI error buffer\n");
7931 * If the function we are about to call succeeds, the
7932 * controller will transition from legacy SIS mode
7935 rc = sis_init_base_struct_addr(ctrl_info);
7937 dev_err(&ctrl_info->pci_dev->dev,
7938 "error initializing PQI mode\n");
7942 /* Wait for the controller to complete the SIS -> PQI transition. */
7943 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7945 dev_err(&ctrl_info->pci_dev->dev,
7946 "transition to PQI mode failed\n");
7950 /* From here on, we are running in PQI mode. */
7951 ctrl_info->pqi_mode_enabled = true;
7952 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7954 rc = pqi_alloc_admin_queues(ctrl_info);
7956 dev_err(&ctrl_info->pci_dev->dev,
7957 "failed to allocate admin queues\n");
7961 rc = pqi_create_admin_queues(ctrl_info);
7963 dev_err(&ctrl_info->pci_dev->dev,
7964 "error creating admin queues\n");
7968 rc = pqi_report_device_capability(ctrl_info);
7970 dev_err(&ctrl_info->pci_dev->dev,
7971 "obtaining device capability failed\n");
7975 rc = pqi_validate_device_capability(ctrl_info);
7979 pqi_calculate_queue_resources(ctrl_info);
7981 rc = pqi_enable_msix_interrupts(ctrl_info);
7985 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
7986 ctrl_info->max_msix_vectors =
7987 ctrl_info->num_msix_vectors_enabled;
7988 pqi_calculate_queue_resources(ctrl_info);
7991 rc = pqi_alloc_io_resources(ctrl_info);
7995 rc = pqi_alloc_operational_queues(ctrl_info);
7997 dev_err(&ctrl_info->pci_dev->dev,
7998 "failed to allocate operational queues\n");
8002 pqi_init_operational_queues(ctrl_info);
8004 rc = pqi_create_queues(ctrl_info);
8008 rc = pqi_request_irqs(ctrl_info);
8012 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8014 ctrl_info->controller_online = true;
8016 rc = pqi_process_config_table(ctrl_info);
8020 pqi_start_heartbeat_timer(ctrl_info);
8022 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8023 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8024 if (rc) { /* Supported features not returned correctly. */
8025 dev_err(&ctrl_info->pci_dev->dev,
8026 "error obtaining advanced RAID bypass configuration\n");
8029 ctrl_info->ciss_report_log_flags |=
8030 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8033 rc = pqi_enable_events(ctrl_info);
8035 dev_err(&ctrl_info->pci_dev->dev,
8036 "error enabling events\n");
8040 /* Register with the SCSI subsystem. */
8041 rc = pqi_register_scsi(ctrl_info);
8045 rc = pqi_get_ctrl_product_details(ctrl_info);
8047 dev_err(&ctrl_info->pci_dev->dev,
8048 "error obtaining product details\n");
8052 rc = pqi_get_ctrl_serial_number(ctrl_info);
8054 dev_err(&ctrl_info->pci_dev->dev,
8055 "error obtaining ctrl serial number\n");
8059 rc = pqi_set_diag_rescan(ctrl_info);
8061 dev_err(&ctrl_info->pci_dev->dev,
8062 "error enabling multi-lun rescan\n");
8066 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8068 dev_err(&ctrl_info->pci_dev->dev,
8069 "error updating host wellness\n");
8073 pqi_schedule_update_time_worker(ctrl_info);
8075 pqi_scan_scsi_devices(ctrl_info);
8080 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8083 struct pqi_admin_queues *admin_queues;
8084 struct pqi_event_queue *event_queue;
8086 admin_queues = &ctrl_info->admin_queues;
8087 admin_queues->iq_pi_copy = 0;
8088 admin_queues->oq_ci_copy = 0;
8089 writel(0, admin_queues->oq_pi);
8091 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8092 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8093 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8094 ctrl_info->queue_groups[i].oq_ci_copy = 0;
8096 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8097 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8098 writel(0, ctrl_info->queue_groups[i].oq_pi);
8101 event_queue = &ctrl_info->event_queue;
8102 writel(0, event_queue->oq_pi);
8103 event_queue->oq_ci_copy = 0;
8106 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8110 rc = pqi_force_sis_mode(ctrl_info);
8115 * Wait until the controller is ready to start accepting SIS
8118 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8123 * Get the controller properties. This allows us to determine
8124 * whether or not it supports PQI mode.
8126 rc = sis_get_ctrl_properties(ctrl_info);
8128 dev_err(&ctrl_info->pci_dev->dev,
8129 "error obtaining controller properties\n");
8133 rc = sis_get_pqi_capabilities(ctrl_info);
8135 dev_err(&ctrl_info->pci_dev->dev,
8136 "error obtaining controller capabilities\n");
8141 * If the function we are about to call succeeds, the
8142 * controller will transition from legacy SIS mode
8145 rc = sis_init_base_struct_addr(ctrl_info);
8147 dev_err(&ctrl_info->pci_dev->dev,
8148 "error initializing PQI mode\n");
8152 /* Wait for the controller to complete the SIS -> PQI transition. */
8153 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8155 dev_err(&ctrl_info->pci_dev->dev,
8156 "transition to PQI mode failed\n");
8160 /* From here on, we are running in PQI mode. */
8161 ctrl_info->pqi_mode_enabled = true;
8162 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8164 pqi_reinit_queues(ctrl_info);
8166 rc = pqi_create_admin_queues(ctrl_info);
8168 dev_err(&ctrl_info->pci_dev->dev,
8169 "error creating admin queues\n");
8173 rc = pqi_create_queues(ctrl_info);
8177 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8179 ctrl_info->controller_online = true;
8180 pqi_ctrl_unblock_requests(ctrl_info);
8182 pqi_ctrl_reset_config(ctrl_info);
8184 rc = pqi_process_config_table(ctrl_info);
8188 pqi_start_heartbeat_timer(ctrl_info);
8190 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8191 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8193 dev_err(&ctrl_info->pci_dev->dev,
8194 "error obtaining advanced RAID bypass configuration\n");
8197 ctrl_info->ciss_report_log_flags |=
8198 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8201 rc = pqi_enable_events(ctrl_info);
8203 dev_err(&ctrl_info->pci_dev->dev,
8204 "error enabling events\n");
8208 rc = pqi_get_ctrl_product_details(ctrl_info);
8210 dev_err(&ctrl_info->pci_dev->dev,
8211 "error obtaining product details\n");
8215 rc = pqi_set_diag_rescan(ctrl_info);
8217 dev_err(&ctrl_info->pci_dev->dev,
8218 "error enabling multi-lun rescan\n");
8222 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8224 dev_err(&ctrl_info->pci_dev->dev,
8225 "error updating host wellness\n");
8229 if (pqi_ofa_in_progress(ctrl_info))
8230 pqi_ctrl_unblock_scan(ctrl_info);
8232 pqi_scan_scsi_devices(ctrl_info);
8237 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
8241 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
8242 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8244 return pcibios_err_to_errno(rc);
8247 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8252 rc = pci_enable_device(ctrl_info->pci_dev);
8254 dev_err(&ctrl_info->pci_dev->dev,
8255 "failed to enable PCI device\n");
8259 if (sizeof(dma_addr_t) > 4)
8260 mask = DMA_BIT_MASK(64);
8262 mask = DMA_BIT_MASK(32);
8264 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8266 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8267 goto disable_device;
8270 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8272 dev_err(&ctrl_info->pci_dev->dev,
8273 "failed to obtain PCI resources\n");
8274 goto disable_device;
8277 ctrl_info->iomem_base = ioremap(pci_resource_start(
8278 ctrl_info->pci_dev, 0),
8279 sizeof(struct pqi_ctrl_registers));
8280 if (!ctrl_info->iomem_base) {
8281 dev_err(&ctrl_info->pci_dev->dev,
8282 "failed to map memory for controller registers\n");
8284 goto release_regions;
8287 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
8289 /* Increase the PCIe completion timeout. */
8290 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8291 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8293 dev_err(&ctrl_info->pci_dev->dev,
8294 "failed to set PCIe completion timeout\n");
8295 goto release_regions;
8298 /* Enable bus mastering. */
8299 pci_set_master(ctrl_info->pci_dev);
8301 ctrl_info->registers = ctrl_info->iomem_base;
8302 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8304 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8309 pci_release_regions(ctrl_info->pci_dev);
8311 pci_disable_device(ctrl_info->pci_dev);
8316 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8318 iounmap(ctrl_info->iomem_base);
8319 pci_release_regions(ctrl_info->pci_dev);
8320 if (pci_is_enabled(ctrl_info->pci_dev))
8321 pci_disable_device(ctrl_info->pci_dev);
8322 pci_set_drvdata(ctrl_info->pci_dev, NULL);
8325 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8327 struct pqi_ctrl_info *ctrl_info;
8329 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8330 GFP_KERNEL, numa_node);
8334 mutex_init(&ctrl_info->scan_mutex);
8335 mutex_init(&ctrl_info->lun_reset_mutex);
8336 mutex_init(&ctrl_info->ofa_mutex);
8338 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8339 spin_lock_init(&ctrl_info->scsi_device_list_lock);
8341 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8342 atomic_set(&ctrl_info->num_interrupts, 0);
8344 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8345 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8347 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8348 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8350 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8351 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8353 sema_init(&ctrl_info->sync_request_sem,
8354 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8355 init_waitqueue_head(&ctrl_info->block_requests_wait);
8357 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8358 ctrl_info->irq_mode = IRQ_MODE_NONE;
8359 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8361 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8362 ctrl_info->max_transfer_encrypted_sas_sata =
8363 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8364 ctrl_info->max_transfer_encrypted_nvme =
8365 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8366 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8367 ctrl_info->max_write_raid_1_10_2drive = ~0;
8368 ctrl_info->max_write_raid_1_10_3drive = ~0;
8373 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8378 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8380 pqi_free_irqs(ctrl_info);
8381 pqi_disable_msix_interrupts(ctrl_info);
8384 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8386 pqi_stop_heartbeat_timer(ctrl_info);
8387 pqi_free_interrupts(ctrl_info);
8388 if (ctrl_info->queue_memory_base)
8389 dma_free_coherent(&ctrl_info->pci_dev->dev,
8390 ctrl_info->queue_memory_length,
8391 ctrl_info->queue_memory_base,
8392 ctrl_info->queue_memory_base_dma_handle);
8393 if (ctrl_info->admin_queue_memory_base)
8394 dma_free_coherent(&ctrl_info->pci_dev->dev,
8395 ctrl_info->admin_queue_memory_length,
8396 ctrl_info->admin_queue_memory_base,
8397 ctrl_info->admin_queue_memory_base_dma_handle);
8398 pqi_free_all_io_requests(ctrl_info);
8399 if (ctrl_info->error_buffer)
8400 dma_free_coherent(&ctrl_info->pci_dev->dev,
8401 ctrl_info->error_buffer_length,
8402 ctrl_info->error_buffer,
8403 ctrl_info->error_buffer_dma_handle);
8404 if (ctrl_info->iomem_base)
8405 pqi_cleanup_pci_init(ctrl_info);
8406 pqi_free_ctrl_info(ctrl_info);
8409 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8411 pqi_cancel_rescan_worker(ctrl_info);
8412 pqi_cancel_update_time_worker(ctrl_info);
8413 pqi_remove_all_scsi_devices(ctrl_info);
8414 pqi_unregister_scsi(ctrl_info);
8415 if (ctrl_info->pqi_mode_enabled)
8416 pqi_revert_to_sis_mode(ctrl_info);
8417 pqi_free_ctrl_resources(ctrl_info);
8420 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8422 pqi_ctrl_block_scan(ctrl_info);
8423 pqi_scsi_block_requests(ctrl_info);
8424 pqi_ctrl_block_device_reset(ctrl_info);
8425 pqi_ctrl_block_requests(ctrl_info);
8426 pqi_ctrl_wait_until_quiesced(ctrl_info);
8427 pqi_stop_heartbeat_timer(ctrl_info);
8430 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8432 pqi_start_heartbeat_timer(ctrl_info);
8433 pqi_ctrl_unblock_requests(ctrl_info);
8434 pqi_ctrl_unblock_device_reset(ctrl_info);
8435 pqi_scsi_unblock_requests(ctrl_info);
8436 pqi_ctrl_unblock_scan(ctrl_info);
8439 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
8444 struct pqi_ofa_memory *ofap;
8445 struct pqi_sg_descriptor *mem_descriptor;
8446 dma_addr_t dma_handle;
8448 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8450 sg_count = DIV_ROUND_UP(total_size, chunk_size);
8451 if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
8454 ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
8455 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
8458 dev = &ctrl_info->pci_dev->dev;
8460 for (i = 0; i < sg_count; i++) {
8461 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
8462 dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8463 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
8464 goto out_free_chunks;
8465 mem_descriptor = &ofap->sg_descriptor[i];
8466 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8467 put_unaligned_le32(chunk_size, &mem_descriptor->length);
8470 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8471 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
8472 put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
8478 mem_descriptor = &ofap->sg_descriptor[i];
8479 dma_free_coherent(dev, chunk_size,
8480 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8481 get_unaligned_le64(&mem_descriptor->address));
8483 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8489 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
8495 if (ctrl_info->ofa_bytes_requested == 0)
8498 total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
8499 min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
8500 min_chunk_size = PAGE_ALIGN(min_chunk_size);
8502 for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
8503 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
8506 chunk_size = PAGE_ALIGN(chunk_size);
8512 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
8515 struct pqi_ofa_memory *ofap;
8517 dev = &ctrl_info->pci_dev->dev;
8519 ofap = dma_alloc_coherent(dev, sizeof(*ofap),
8520 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
8524 ctrl_info->pqi_ofa_mem_virt_addr = ofap;
8526 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
8528 "failed to allocate host buffer for Online Firmware Activation\n");
8529 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
8530 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8534 put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
8535 memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
8538 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
8542 struct pqi_ofa_memory *ofap;
8543 struct pqi_sg_descriptor *mem_descriptor;
8544 unsigned int num_memory_descriptors;
8546 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8550 dev = &ctrl_info->pci_dev->dev;
8552 if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
8555 mem_descriptor = ofap->sg_descriptor;
8556 num_memory_descriptors =
8557 get_unaligned_le16(&ofap->num_memory_descriptors);
8559 for (i = 0; i < num_memory_descriptors; i++) {
8560 dma_free_coherent(dev,
8561 get_unaligned_le32(&mem_descriptor[i].length),
8562 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8563 get_unaligned_le64(&mem_descriptor[i].address));
8565 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8568 dma_free_coherent(dev, sizeof(*ofap), ofap,
8569 ctrl_info->pqi_ofa_mem_dma_handle);
8570 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8573 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
8576 struct pqi_vendor_general_request request;
8577 struct pqi_ofa_memory *ofap;
8579 memset(&request, 0, sizeof(request));
8581 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
8582 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
8583 &request.header.iu_length);
8584 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
8585 &request.function_code);
8587 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8590 buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
8591 get_unaligned_le16(&ofap->num_memory_descriptors) *
8592 sizeof(struct pqi_sg_descriptor);
8594 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
8595 &request.data.ofa_memory_allocation.buffer_address);
8596 put_unaligned_le32(buffer_length,
8597 &request.data.ofa_memory_allocation.buffer_length);
8600 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
8603 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8607 return pqi_ctrl_init_resume(ctrl_info);
8610 static void pqi_perform_lockup_action(void)
8612 switch (pqi_lockup_action) {
8614 panic("FATAL: Smart Family Controller lockup detected");
8617 emergency_restart();
8625 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
8626 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
8627 .status = SAM_STAT_CHECK_CONDITION,
8630 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
8633 struct pqi_io_request *io_request;
8634 struct scsi_cmnd *scmd;
8635 struct scsi_device *sdev;
8637 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8638 io_request = &ctrl_info->io_request_pool[i];
8639 if (atomic_read(&io_request->refcount) == 0)
8642 scmd = io_request->scmd;
8644 sdev = scmd->device;
8645 if (!sdev || !scsi_device_online(sdev)) {
8646 pqi_free_io_request(io_request);
8649 set_host_byte(scmd, DID_NO_CONNECT);
8652 io_request->status = -ENXIO;
8653 io_request->error_info =
8654 &pqi_ctrl_offline_raid_error_info;
8657 io_request->io_complete_callback(io_request,
8658 io_request->context);
8662 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
8664 pqi_perform_lockup_action();
8665 pqi_stop_heartbeat_timer(ctrl_info);
8666 pqi_free_interrupts(ctrl_info);
8667 pqi_cancel_rescan_worker(ctrl_info);
8668 pqi_cancel_update_time_worker(ctrl_info);
8669 pqi_ctrl_wait_until_quiesced(ctrl_info);
8670 pqi_fail_all_outstanding_requests(ctrl_info);
8671 pqi_ctrl_unblock_requests(ctrl_info);
8674 static void pqi_ctrl_offline_worker(struct work_struct *work)
8676 struct pqi_ctrl_info *ctrl_info;
8678 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
8679 pqi_take_ctrl_offline_deferred(ctrl_info);
8682 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
8683 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
8685 if (!ctrl_info->controller_online)
8688 ctrl_info->controller_online = false;
8689 ctrl_info->pqi_mode_enabled = false;
8690 pqi_ctrl_block_requests(ctrl_info);
8691 if (!pqi_disable_ctrl_shutdown)
8692 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
8693 pci_disable_device(ctrl_info->pci_dev);
8694 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
8695 schedule_work(&ctrl_info->ctrl_offline_work);
8698 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
8699 const struct pci_device_id *id)
8701 char *ctrl_description;
8703 if (id->driver_data)
8704 ctrl_description = (char *)id->driver_data;
8706 ctrl_description = "Microchip Smart Family Controller";
8708 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
8711 static int pqi_pci_probe(struct pci_dev *pci_dev,
8712 const struct pci_device_id *id)
8716 struct pqi_ctrl_info *ctrl_info;
8718 pqi_print_ctrl_info(pci_dev, id);
8720 if (pqi_disable_device_id_wildcards &&
8721 id->subvendor == PCI_ANY_ID &&
8722 id->subdevice == PCI_ANY_ID) {
8723 dev_warn(&pci_dev->dev,
8724 "controller not probed because device ID wildcards are disabled\n");
8728 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
8729 dev_warn(&pci_dev->dev,
8730 "controller device ID matched using wildcards\n");
8732 node = dev_to_node(&pci_dev->dev);
8733 if (node == NUMA_NO_NODE) {
8734 cp_node = cpu_to_node(0);
8735 if (cp_node == NUMA_NO_NODE)
8737 set_dev_node(&pci_dev->dev, cp_node);
8740 ctrl_info = pqi_alloc_ctrl_info(node);
8742 dev_err(&pci_dev->dev,
8743 "failed to allocate controller info block\n");
8747 ctrl_info->pci_dev = pci_dev;
8749 rc = pqi_pci_init(ctrl_info);
8753 rc = pqi_ctrl_init(ctrl_info);
8760 pqi_remove_ctrl(ctrl_info);
8765 static void pqi_pci_remove(struct pci_dev *pci_dev)
8767 struct pqi_ctrl_info *ctrl_info;
8769 ctrl_info = pci_get_drvdata(pci_dev);
8773 pqi_remove_ctrl(ctrl_info);
8776 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
8779 struct pqi_io_request *io_request;
8780 struct scsi_cmnd *scmd;
8782 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8783 io_request = &ctrl_info->io_request_pool[i];
8784 if (atomic_read(&io_request->refcount) == 0)
8786 scmd = io_request->scmd;
8787 WARN_ON(scmd != NULL); /* IO command from SML */
8788 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
8792 static void pqi_shutdown(struct pci_dev *pci_dev)
8795 struct pqi_ctrl_info *ctrl_info;
8797 ctrl_info = pci_get_drvdata(pci_dev);
8799 dev_err(&pci_dev->dev,
8800 "cache could not be flushed\n");
8804 pqi_wait_until_ofa_finished(ctrl_info);
8806 pqi_scsi_block_requests(ctrl_info);
8807 pqi_ctrl_block_device_reset(ctrl_info);
8808 pqi_ctrl_block_requests(ctrl_info);
8809 pqi_ctrl_wait_until_quiesced(ctrl_info);
8812 * Write all data in the controller's battery-backed cache to
8815 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
8817 dev_err(&pci_dev->dev,
8818 "unable to flush controller cache\n");
8820 pqi_crash_if_pending_command(ctrl_info);
8821 pqi_reset(ctrl_info);
8824 static void pqi_process_lockup_action_param(void)
8828 if (!pqi_lockup_action_param)
8831 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
8832 if (strcmp(pqi_lockup_action_param,
8833 pqi_lockup_actions[i].name) == 0) {
8834 pqi_lockup_action = pqi_lockup_actions[i].action;
8839 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
8840 DRIVER_NAME_SHORT, pqi_lockup_action_param);
8843 static void pqi_process_module_params(void)
8845 pqi_process_lockup_action_param();
8848 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
8850 struct pqi_ctrl_info *ctrl_info;
8852 ctrl_info = pci_get_drvdata(pci_dev);
8854 pqi_wait_until_ofa_finished(ctrl_info);
8856 pqi_ctrl_block_scan(ctrl_info);
8857 pqi_scsi_block_requests(ctrl_info);
8858 pqi_ctrl_block_device_reset(ctrl_info);
8859 pqi_ctrl_block_requests(ctrl_info);
8860 pqi_ctrl_wait_until_quiesced(ctrl_info);
8861 pqi_flush_cache(ctrl_info, SUSPEND);
8862 pqi_stop_heartbeat_timer(ctrl_info);
8864 pqi_crash_if_pending_command(ctrl_info);
8866 if (state.event == PM_EVENT_FREEZE)
8869 pci_save_state(pci_dev);
8870 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
8872 ctrl_info->controller_online = false;
8873 ctrl_info->pqi_mode_enabled = false;
8878 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
8881 struct pqi_ctrl_info *ctrl_info;
8883 ctrl_info = pci_get_drvdata(pci_dev);
8885 if (pci_dev->current_state != PCI_D0) {
8886 ctrl_info->max_hw_queue_index = 0;
8887 pqi_free_interrupts(ctrl_info);
8888 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
8889 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
8890 IRQF_SHARED, DRIVER_NAME_SHORT,
8891 &ctrl_info->queue_groups[0]);
8893 dev_err(&ctrl_info->pci_dev->dev,
8894 "irq %u init failed with error %d\n",
8898 pqi_ctrl_unblock_device_reset(ctrl_info);
8899 pqi_ctrl_unblock_requests(ctrl_info);
8900 pqi_scsi_unblock_requests(ctrl_info);
8901 pqi_ctrl_unblock_scan(ctrl_info);
8905 pci_set_power_state(pci_dev, PCI_D0);
8906 pci_restore_state(pci_dev);
8908 pqi_ctrl_unblock_device_reset(ctrl_info);
8909 pqi_ctrl_unblock_requests(ctrl_info);
8910 pqi_scsi_unblock_requests(ctrl_info);
8911 pqi_ctrl_unblock_scan(ctrl_info);
8913 return pqi_ctrl_init_resume(ctrl_info);
8916 /* Define the PCI IDs for the controllers that we support. */
8917 static const struct pci_device_id pqi_pci_id_table[] = {
8919 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8923 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8927 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8931 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8935 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8939 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8943 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8947 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8951 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8955 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8959 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8963 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8967 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8971 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8975 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8979 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8983 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8987 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8991 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8995 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8999 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9003 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9007 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9011 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9015 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9019 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9023 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9027 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9031 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9035 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9039 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9043 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9047 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9051 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9055 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9059 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9063 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9067 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9071 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9072 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9075 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9076 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
9079 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9080 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
9083 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9084 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
9087 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9088 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
9091 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9092 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
9095 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9096 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
9099 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9100 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
9103 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9104 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
9107 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9108 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9111 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9112 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9115 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9116 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9119 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9120 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9123 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9124 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
9127 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9128 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
9131 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9132 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
9135 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9136 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
9139 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9140 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
9143 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9144 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
9147 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9148 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
9151 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9152 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
9155 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9156 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
9159 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9160 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9163 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9164 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
9167 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9168 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
9171 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9172 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
9175 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9176 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
9179 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9180 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
9183 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9184 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9187 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9188 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
9191 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9192 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
9195 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9196 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9199 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9200 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9203 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9204 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9207 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9208 PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9211 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9212 PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9215 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9216 PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9219 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9220 PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9223 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9224 PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9227 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9228 PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9231 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9232 PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9235 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9236 PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9239 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9240 PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9243 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9244 PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9247 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9248 PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9251 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9252 PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9255 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9256 PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9259 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9260 PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9263 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9264 PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9267 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9268 PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9271 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9272 PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9275 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9276 PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9279 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9280 PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9283 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9284 PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9287 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9288 PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9291 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9292 PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9295 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9296 PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
9299 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9300 PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
9303 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9304 PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
9307 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9308 PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
9311 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9312 PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
9315 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9316 PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
9319 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9320 PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
9323 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9324 PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
9327 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9328 PCI_VENDOR_ID_ADVANTECH, 0x8312)
9331 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9332 PCI_VENDOR_ID_DELL, 0x1fe0)
9335 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9336 PCI_VENDOR_ID_HP, 0x0600)
9339 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9340 PCI_VENDOR_ID_HP, 0x0601)
9343 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9344 PCI_VENDOR_ID_HP, 0x0602)
9347 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9348 PCI_VENDOR_ID_HP, 0x0603)
9351 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9352 PCI_VENDOR_ID_HP, 0x0609)
9355 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9356 PCI_VENDOR_ID_HP, 0x0650)
9359 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9360 PCI_VENDOR_ID_HP, 0x0651)
9363 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9364 PCI_VENDOR_ID_HP, 0x0652)
9367 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9368 PCI_VENDOR_ID_HP, 0x0653)
9371 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9372 PCI_VENDOR_ID_HP, 0x0654)
9375 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9376 PCI_VENDOR_ID_HP, 0x0655)
9379 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9380 PCI_VENDOR_ID_HP, 0x0700)
9383 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9384 PCI_VENDOR_ID_HP, 0x0701)
9387 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9388 PCI_VENDOR_ID_HP, 0x1001)
9391 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9392 PCI_VENDOR_ID_HP, 0x1002)
9395 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9396 PCI_VENDOR_ID_HP, 0x1100)
9399 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9400 PCI_VENDOR_ID_HP, 0x1101)
9403 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9407 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9411 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9415 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9419 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9423 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9427 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9431 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9435 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9436 PCI_VENDOR_ID_GIGABYTE, 0x1000)
9439 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9443 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9447 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9451 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9455 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9459 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9463 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9467 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9468 PCI_ANY_ID, PCI_ANY_ID)
9473 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
9475 static struct pci_driver pqi_pci_driver = {
9476 .name = DRIVER_NAME_SHORT,
9477 .id_table = pqi_pci_id_table,
9478 .probe = pqi_pci_probe,
9479 .remove = pqi_pci_remove,
9480 .shutdown = pqi_shutdown,
9481 #if defined(CONFIG_PM)
9482 .suspend = pqi_suspend,
9483 .resume = pqi_resume,
9487 static int __init pqi_init(void)
9491 pr_info(DRIVER_NAME "\n");
9493 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
9494 if (!pqi_sas_transport_template)
9497 pqi_process_module_params();
9499 rc = pci_register_driver(&pqi_pci_driver);
9501 sas_release_transport(pqi_sas_transport_template);
9506 static void __exit pqi_cleanup(void)
9508 pci_unregister_driver(&pqi_pci_driver);
9509 sas_release_transport(pqi_sas_transport_template);
9512 module_init(pqi_init);
9513 module_exit(pqi_cleanup);
9515 static void __attribute__((unused)) verify_structures(void)
9517 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9518 sis_host_to_ctrl_doorbell) != 0x20);
9519 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9520 sis_interrupt_mask) != 0x34);
9521 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9522 sis_ctrl_to_host_doorbell) != 0x9c);
9523 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9524 sis_ctrl_to_host_doorbell_clear) != 0xa0);
9525 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9526 sis_driver_scratch) != 0xb0);
9527 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9528 sis_product_identifier) != 0xb4);
9529 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9530 sis_firmware_status) != 0xbc);
9531 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9532 sis_ctrl_shutdown_reason_code) != 0xcc);
9533 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9534 sis_mailbox) != 0x1000);
9535 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9536 pqi_registers) != 0x4000);
9538 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9540 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9542 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9543 response_queue_id) != 0x4);
9544 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9545 driver_flags) != 0x6);
9546 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
9548 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9550 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9551 service_response) != 0x1);
9552 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9553 data_present) != 0x2);
9554 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9556 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9557 residual_count) != 0x4);
9558 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9559 data_length) != 0x8);
9560 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9562 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9564 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
9566 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9567 data_in_result) != 0x0);
9568 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9569 data_out_result) != 0x1);
9570 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9572 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9574 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9575 status_qualifier) != 0x6);
9576 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9577 sense_data_length) != 0x8);
9578 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9579 response_data_length) != 0xa);
9580 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9581 data_in_transferred) != 0xc);
9582 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9583 data_out_transferred) != 0x10);
9584 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9586 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
9588 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9590 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9591 function_and_status_code) != 0x8);
9592 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9593 max_admin_iq_elements) != 0x10);
9594 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9595 max_admin_oq_elements) != 0x11);
9596 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9597 admin_iq_element_length) != 0x12);
9598 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9599 admin_oq_element_length) != 0x13);
9600 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9601 max_reset_timeout) != 0x14);
9602 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9603 legacy_intx_status) != 0x18);
9604 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9605 legacy_intx_mask_set) != 0x1c);
9606 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9607 legacy_intx_mask_clear) != 0x20);
9608 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9609 device_status) != 0x40);
9610 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9611 admin_iq_pi_offset) != 0x48);
9612 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9613 admin_oq_ci_offset) != 0x50);
9614 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9615 admin_iq_element_array_addr) != 0x58);
9616 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9617 admin_oq_element_array_addr) != 0x60);
9618 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9619 admin_iq_ci_addr) != 0x68);
9620 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9621 admin_oq_pi_addr) != 0x70);
9622 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9623 admin_iq_num_elements) != 0x78);
9624 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9625 admin_oq_num_elements) != 0x79);
9626 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9627 admin_queue_int_msg_num) != 0x7a);
9628 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9629 device_error) != 0x80);
9630 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9631 error_details) != 0x88);
9632 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9633 device_reset) != 0x90);
9634 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9635 power_action) != 0x94);
9636 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
9638 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9639 header.iu_type) != 0);
9640 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9641 header.iu_length) != 2);
9642 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9643 header.driver_flags) != 6);
9644 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9646 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9647 function_code) != 10);
9648 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9649 data.report_device_capability.buffer_length) != 44);
9650 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9651 data.report_device_capability.sg_descriptor) != 48);
9652 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9653 data.create_operational_iq.queue_id) != 12);
9654 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9655 data.create_operational_iq.element_array_addr) != 16);
9656 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9657 data.create_operational_iq.ci_addr) != 24);
9658 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9659 data.create_operational_iq.num_elements) != 32);
9660 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9661 data.create_operational_iq.element_length) != 34);
9662 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9663 data.create_operational_iq.queue_protocol) != 36);
9664 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9665 data.create_operational_oq.queue_id) != 12);
9666 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9667 data.create_operational_oq.element_array_addr) != 16);
9668 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9669 data.create_operational_oq.pi_addr) != 24);
9670 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9671 data.create_operational_oq.num_elements) != 32);
9672 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9673 data.create_operational_oq.element_length) != 34);
9674 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9675 data.create_operational_oq.queue_protocol) != 36);
9676 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9677 data.create_operational_oq.int_msg_num) != 40);
9678 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9679 data.create_operational_oq.coalescing_count) != 42);
9680 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9681 data.create_operational_oq.min_coalescing_time) != 44);
9682 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9683 data.create_operational_oq.max_coalescing_time) != 48);
9684 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9685 data.delete_operational_queue.queue_id) != 12);
9686 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
9687 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9688 data.create_operational_iq) != 64 - 11);
9689 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9690 data.create_operational_oq) != 64 - 11);
9691 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9692 data.delete_operational_queue) != 64 - 11);
9694 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9695 header.iu_type) != 0);
9696 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9697 header.iu_length) != 2);
9698 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9699 header.driver_flags) != 6);
9700 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9702 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9703 function_code) != 10);
9704 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9706 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9707 data.create_operational_iq.status_descriptor) != 12);
9708 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9709 data.create_operational_iq.iq_pi_offset) != 16);
9710 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9711 data.create_operational_oq.status_descriptor) != 12);
9712 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9713 data.create_operational_oq.oq_ci_offset) != 16);
9714 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
9716 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9717 header.iu_type) != 0);
9718 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9719 header.iu_length) != 2);
9720 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9721 header.response_queue_id) != 4);
9722 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9723 header.driver_flags) != 6);
9724 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9726 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9728 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9729 buffer_length) != 12);
9730 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9732 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9733 protocol_specific) != 24);
9734 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9735 error_index) != 27);
9736 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9738 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9740 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9741 sg_descriptors) != 64);
9742 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
9743 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
9745 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9746 header.iu_type) != 0);
9747 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9748 header.iu_length) != 2);
9749 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9750 header.response_queue_id) != 4);
9751 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9752 header.driver_flags) != 6);
9753 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9755 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9757 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9758 buffer_length) != 16);
9759 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9760 data_encryption_key_index) != 22);
9761 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9762 encrypt_tweak_lower) != 24);
9763 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9764 encrypt_tweak_upper) != 28);
9765 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9767 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9768 error_index) != 48);
9769 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9770 num_sg_descriptors) != 50);
9771 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9773 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9775 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9776 sg_descriptors) != 64);
9777 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
9778 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
9780 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9781 header.iu_type) != 0);
9782 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9783 header.iu_length) != 2);
9784 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9786 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9787 error_index) != 10);
9789 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9790 header.iu_type) != 0);
9791 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9792 header.iu_length) != 2);
9793 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9794 header.response_queue_id) != 4);
9795 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9797 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9798 data.report_event_configuration.buffer_length) != 12);
9799 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9800 data.report_event_configuration.sg_descriptors) != 16);
9801 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9802 data.set_event_configuration.global_event_oq_id) != 10);
9803 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9804 data.set_event_configuration.buffer_length) != 12);
9805 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9806 data.set_event_configuration.sg_descriptors) != 16);
9808 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
9809 max_inbound_iu_length) != 6);
9810 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
9811 max_outbound_iu_length) != 14);
9812 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
9814 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9816 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9817 iq_arbitration_priority_support_bitmask) != 8);
9818 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9819 maximum_aw_a) != 9);
9820 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9821 maximum_aw_b) != 10);
9822 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9823 maximum_aw_c) != 11);
9824 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9825 max_inbound_queues) != 16);
9826 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9827 max_elements_per_iq) != 18);
9828 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9829 max_iq_element_length) != 24);
9830 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9831 min_iq_element_length) != 26);
9832 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9833 max_outbound_queues) != 30);
9834 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9835 max_elements_per_oq) != 32);
9836 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9837 intr_coalescing_time_granularity) != 34);
9838 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9839 max_oq_element_length) != 36);
9840 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9841 min_oq_element_length) != 38);
9842 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9843 iu_layer_descriptors) != 64);
9844 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
9846 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
9848 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
9850 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
9852 BUILD_BUG_ON(offsetof(struct pqi_event_config,
9853 num_event_descriptors) != 2);
9854 BUILD_BUG_ON(offsetof(struct pqi_event_config,
9857 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
9858 ARRAY_SIZE(pqi_supported_event_types));
9860 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9861 header.iu_type) != 0);
9862 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9863 header.iu_length) != 2);
9864 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9866 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9868 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9869 additional_event_id) != 12);
9870 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9872 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
9874 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9875 header.iu_type) != 0);
9876 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9877 header.iu_length) != 2);
9878 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9880 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9882 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9883 additional_event_id) != 12);
9884 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
9886 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9887 header.iu_type) != 0);
9888 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9889 header.iu_length) != 2);
9890 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9892 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9894 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9896 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9898 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9899 protocol_specific) != 24);
9900 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9901 outbound_queue_id_to_manage) != 26);
9902 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9903 request_id_to_manage) != 28);
9904 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9905 task_management_function) != 30);
9906 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
9908 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9909 header.iu_type) != 0);
9910 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9911 header.iu_length) != 2);
9912 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9914 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9916 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9917 additional_response_info) != 12);
9918 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9919 response_code) != 15);
9920 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
9922 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9923 configured_logical_drive_count) != 0);
9924 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9925 configuration_signature) != 1);
9926 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9927 firmware_version_short) != 5);
9928 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9929 extended_logical_unit_count) != 154);
9930 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9931 firmware_build_number) != 190);
9932 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9934 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9935 product_id) != 208);
9936 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9937 extra_controller_flags) != 286);
9938 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9939 controller_mode) != 292);
9940 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9941 spare_part_number) != 293);
9942 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9943 firmware_version_long) != 325);
9945 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9946 phys_bay_in_box) != 115);
9947 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9948 device_type) != 120);
9949 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9950 redundant_path_present_map) != 1736);
9951 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9952 active_path_number) != 1738);
9953 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9954 alternate_paths_phys_connector) != 1739);
9955 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9956 alternate_paths_phys_box_on_port) != 1755);
9957 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9958 current_queue_depth_limit) != 1796);
9959 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
9961 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
9962 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
9964 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
9965 subpage_code) != 1);
9966 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
9967 buffer_length) != 2);
9969 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
9970 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
9972 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
9973 subpage_code) != 1);
9974 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
9977 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
9979 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9981 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9982 firmware_read_support) != 4);
9983 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9984 driver_read_support) != 5);
9985 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9986 firmware_write_support) != 6);
9987 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9988 driver_write_support) != 7);
9989 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9990 max_transfer_encrypted_sas_sata) != 8);
9991 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9992 max_transfer_encrypted_nvme) != 10);
9993 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9994 max_write_raid_5_6) != 12);
9995 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9996 max_write_raid_1_10_2drive) != 14);
9997 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9998 max_write_raid_1_10_3drive) != 16);
10000 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
10001 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
10002 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
10003 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10004 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
10005 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10006 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
10007 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
10008 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10009 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
10010 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
10011 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10013 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
10014 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
10015 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);