1 // SPDX-License-Identifier: GPL-2.0
3 * driver for Microsemi PQI-based storage controllers
4 * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
8 * Questions/Comments/Bugfixes to storagedev@microchip.com
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
30 #include "smartpqi_sis.h"
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
36 #define DRIVER_VERSION "2.1.8-045"
37 #define DRIVER_MAJOR 2
38 #define DRIVER_MINOR 1
39 #define DRIVER_RELEASE 8
40 #define DRIVER_REVISION 45
42 #define DRIVER_NAME "Microsemi PQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT "smartpqi"
46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
48 #define PQI_POST_RESET_DELAY_SECS 5
49 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
51 MODULE_AUTHOR("Microsemi");
52 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
54 MODULE_VERSION(DRIVER_VERSION);
55 MODULE_LICENSE("GPL");
57 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
58 static void pqi_ctrl_offline_worker(struct work_struct *work);
59 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
60 static void pqi_scan_start(struct Scsi_Host *shost);
61 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
62 struct pqi_queue_group *queue_group, enum pqi_io_path path,
63 struct pqi_io_request *io_request);
64 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
65 struct pqi_iu_header *request, unsigned int flags,
66 struct pqi_raid_error_info *error_info);
67 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
68 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
69 unsigned int cdb_length, struct pqi_queue_group *queue_group,
70 struct pqi_encryption_info *encryption_info, bool raid_bypass);
71 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
72 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
73 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
74 struct pqi_scsi_dev_raid_map_data *rmd);
75 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
76 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
77 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
78 struct pqi_scsi_dev_raid_map_data *rmd);
79 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
80 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
81 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
82 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
83 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
84 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
85 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
86 struct pqi_scsi_dev *device, unsigned long timeout_msecs);
88 /* for flags argument to pqi_submit_raid_request_synchronous() */
89 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
91 static struct scsi_transport_template *pqi_sas_transport_template;
93 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
95 enum pqi_lockup_action {
101 static enum pqi_lockup_action pqi_lockup_action = NONE;
104 enum pqi_lockup_action action;
106 } pqi_lockup_actions[] = {
121 static unsigned int pqi_supported_event_types[] = {
122 PQI_EVENT_TYPE_HOTPLUG,
123 PQI_EVENT_TYPE_HARDWARE,
124 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
125 PQI_EVENT_TYPE_LOGICAL_DEVICE,
127 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
128 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
131 static int pqi_disable_device_id_wildcards;
132 module_param_named(disable_device_id_wildcards,
133 pqi_disable_device_id_wildcards, int, 0644);
134 MODULE_PARM_DESC(disable_device_id_wildcards,
135 "Disable device ID wildcards.");
137 static int pqi_disable_heartbeat;
138 module_param_named(disable_heartbeat,
139 pqi_disable_heartbeat, int, 0644);
140 MODULE_PARM_DESC(disable_heartbeat,
141 "Disable heartbeat.");
143 static int pqi_disable_ctrl_shutdown;
144 module_param_named(disable_ctrl_shutdown,
145 pqi_disable_ctrl_shutdown, int, 0644);
146 MODULE_PARM_DESC(disable_ctrl_shutdown,
147 "Disable controller shutdown when controller locked up.");
149 static char *pqi_lockup_action_param;
150 module_param_named(lockup_action,
151 pqi_lockup_action_param, charp, 0644);
152 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
153 "\t\tSupported: none, reboot, panic\n"
154 "\t\tDefault: none");
156 static int pqi_expose_ld_first;
157 module_param_named(expose_ld_first,
158 pqi_expose_ld_first, int, 0644);
159 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
161 static int pqi_hide_vsep;
162 module_param_named(hide_vsep,
163 pqi_hide_vsep, int, 0644);
164 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
166 static char *raid_levels[] = {
176 static char *pqi_raid_level_to_string(u8 raid_level)
178 if (raid_level < ARRAY_SIZE(raid_levels))
179 return raid_levels[raid_level];
181 return "RAID UNKNOWN";
186 #define SA_RAID_1 2 /* also used for RAID 10 */
187 #define SA_RAID_5 3 /* also used for RAID 50 */
189 #define SA_RAID_6 5 /* also used for RAID 60 */
190 #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
191 #define SA_RAID_MAX SA_RAID_TRIPLE
192 #define SA_RAID_UNKNOWN 0xff
194 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
196 pqi_prep_for_scsi_done(scmd);
197 scmd->scsi_done(scmd);
200 static inline void pqi_disable_write_same(struct scsi_device *sdev)
202 sdev->no_write_same = 1;
205 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
207 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
210 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
212 return !device->is_physical_device;
215 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
217 return scsi3addr[2] != 0;
220 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
222 return !ctrl_info->controller_online;
225 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
227 if (ctrl_info->controller_online)
228 if (!sis_is_firmware_running(ctrl_info))
229 pqi_take_ctrl_offline(ctrl_info);
232 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
234 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
237 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
239 return sis_read_driver_scratch(ctrl_info);
242 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
243 enum pqi_ctrl_mode mode)
245 sis_write_driver_scratch(ctrl_info, mode);
248 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
250 ctrl_info->scan_blocked = true;
251 mutex_lock(&ctrl_info->scan_mutex);
254 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
256 ctrl_info->scan_blocked = false;
257 mutex_unlock(&ctrl_info->scan_mutex);
260 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
262 return ctrl_info->scan_blocked;
265 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
267 mutex_lock(&ctrl_info->lun_reset_mutex);
270 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
272 mutex_unlock(&ctrl_info->lun_reset_mutex);
275 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
277 struct Scsi_Host *shost;
278 unsigned int num_loops;
281 shost = ctrl_info->scsi_host;
283 scsi_block_requests(shost);
287 while (scsi_host_busy(shost)) {
295 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
297 scsi_unblock_requests(ctrl_info->scsi_host);
300 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
302 atomic_inc(&ctrl_info->num_busy_threads);
305 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
307 atomic_dec(&ctrl_info->num_busy_threads);
310 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
312 return ctrl_info->block_requests;
315 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
317 ctrl_info->block_requests = true;
320 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
322 ctrl_info->block_requests = false;
323 wake_up_all(&ctrl_info->block_requests_wait);
326 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
328 if (!pqi_ctrl_blocked(ctrl_info))
331 atomic_inc(&ctrl_info->num_blocked_threads);
332 wait_event(ctrl_info->block_requests_wait,
333 !pqi_ctrl_blocked(ctrl_info));
334 atomic_dec(&ctrl_info->num_blocked_threads);
337 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
339 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
341 unsigned long start_jiffies;
342 unsigned long warning_timeout;
343 bool displayed_warning;
345 displayed_warning = false;
346 start_jiffies = jiffies;
347 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
349 while (atomic_read(&ctrl_info->num_busy_threads) >
350 atomic_read(&ctrl_info->num_blocked_threads)) {
351 if (time_after(jiffies, warning_timeout)) {
352 dev_warn(&ctrl_info->pci_dev->dev,
353 "waiting %u seconds for driver activity to quiesce\n",
354 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
355 displayed_warning = true;
356 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
358 usleep_range(1000, 2000);
361 if (displayed_warning)
362 dev_warn(&ctrl_info->pci_dev->dev,
363 "driver activity quiesced after waiting for %u seconds\n",
364 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
367 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
369 return device->device_offline;
372 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
374 mutex_lock(&ctrl_info->ofa_mutex);
377 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
379 mutex_unlock(&ctrl_info->ofa_mutex);
382 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
384 mutex_lock(&ctrl_info->ofa_mutex);
385 mutex_unlock(&ctrl_info->ofa_mutex);
388 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
390 return mutex_is_locked(&ctrl_info->ofa_mutex);
393 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
395 device->in_remove = true;
398 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
400 return device->in_remove;
403 static inline int pqi_event_type_to_event_index(unsigned int event_type)
407 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
408 if (event_type == pqi_supported_event_types[index])
414 static inline bool pqi_is_supported_event(unsigned int event_type)
416 return pqi_event_type_to_event_index(event_type) != -1;
419 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
422 if (pqi_ctrl_offline(ctrl_info))
425 schedule_delayed_work(&ctrl_info->rescan_work, delay);
428 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
430 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
433 #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
435 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
437 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
440 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
442 cancel_delayed_work_sync(&ctrl_info->rescan_work);
445 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
447 if (!ctrl_info->heartbeat_counter)
450 return readl(ctrl_info->heartbeat_counter);
453 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
455 return readb(ctrl_info->soft_reset_status);
458 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
462 status = pqi_read_soft_reset_status(ctrl_info);
463 status &= ~PQI_SOFT_RESET_ABORT;
464 writeb(status, ctrl_info->soft_reset_status);
467 static int pqi_map_single(struct pci_dev *pci_dev,
468 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
469 size_t buffer_length, enum dma_data_direction data_direction)
471 dma_addr_t bus_address;
473 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
476 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
478 if (dma_mapping_error(&pci_dev->dev, bus_address))
481 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
482 put_unaligned_le32(buffer_length, &sg_descriptor->length);
483 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
488 static void pqi_pci_unmap(struct pci_dev *pci_dev,
489 struct pqi_sg_descriptor *descriptors, int num_descriptors,
490 enum dma_data_direction data_direction)
494 if (data_direction == DMA_NONE)
497 for (i = 0; i < num_descriptors; i++)
498 dma_unmap_single(&pci_dev->dev,
499 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
500 get_unaligned_le32(&descriptors[i].length),
504 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
505 struct pqi_raid_path_request *request, u8 cmd,
506 u8 *scsi3addr, void *buffer, size_t buffer_length,
507 u16 vpd_page, enum dma_data_direction *dir)
510 size_t cdb_length = buffer_length;
512 memset(request, 0, sizeof(*request));
514 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
515 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
516 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
517 &request->header.iu_length);
518 put_unaligned_le32(buffer_length, &request->buffer_length);
519 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
520 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
521 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
527 request->data_direction = SOP_READ_FLAG;
529 if (vpd_page & VPD_PAGE) {
531 cdb[2] = (u8)vpd_page;
533 cdb[4] = (u8)cdb_length;
535 case CISS_REPORT_LOG:
536 case CISS_REPORT_PHYS:
537 request->data_direction = SOP_READ_FLAG;
539 if (cmd == CISS_REPORT_PHYS)
540 cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
542 cdb[1] = ctrl_info->ciss_report_log_flags;
543 put_unaligned_be32(cdb_length, &cdb[6]);
545 case CISS_GET_RAID_MAP:
546 request->data_direction = SOP_READ_FLAG;
548 cdb[1] = CISS_GET_RAID_MAP;
549 put_unaligned_be32(cdb_length, &cdb[6]);
552 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
553 request->data_direction = SOP_WRITE_FLAG;
555 cdb[6] = BMIC_FLUSH_CACHE;
556 put_unaligned_be16(cdb_length, &cdb[7]);
558 case BMIC_SENSE_DIAG_OPTIONS:
561 case BMIC_IDENTIFY_CONTROLLER:
562 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
563 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
564 case BMIC_SENSE_FEATURE:
565 request->data_direction = SOP_READ_FLAG;
568 put_unaligned_be16(cdb_length, &cdb[7]);
570 case BMIC_SET_DIAG_OPTIONS:
573 case BMIC_WRITE_HOST_WELLNESS:
574 request->data_direction = SOP_WRITE_FLAG;
577 put_unaligned_be16(cdb_length, &cdb[7]);
579 case BMIC_CSMI_PASSTHRU:
580 request->data_direction = SOP_BIDIRECTIONAL;
582 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
584 put_unaligned_be16(cdb_length, &cdb[7]);
587 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
591 switch (request->data_direction) {
593 *dir = DMA_FROM_DEVICE;
596 *dir = DMA_TO_DEVICE;
598 case SOP_NO_DIRECTION_FLAG:
602 *dir = DMA_BIDIRECTIONAL;
606 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
607 buffer, buffer_length, *dir);
610 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
612 io_request->scmd = NULL;
613 io_request->status = 0;
614 io_request->error_info = NULL;
615 io_request->raid_bypass = false;
618 static struct pqi_io_request *pqi_alloc_io_request(
619 struct pqi_ctrl_info *ctrl_info)
621 struct pqi_io_request *io_request;
622 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
625 io_request = &ctrl_info->io_request_pool[i];
626 if (atomic_inc_return(&io_request->refcount) == 1)
628 atomic_dec(&io_request->refcount);
629 i = (i + 1) % ctrl_info->max_io_slots;
633 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
635 pqi_reinit_io_request(io_request);
640 static void pqi_free_io_request(struct pqi_io_request *io_request)
642 atomic_dec(&io_request->refcount);
645 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
646 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
647 struct pqi_raid_error_info *error_info)
650 struct pqi_raid_path_request request;
651 enum dma_data_direction dir;
653 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
654 buffer, buffer_length, vpd_page, &dir);
658 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
660 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
665 /* helper functions for pqi_send_scsi_raid_request */
667 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
668 u8 cmd, void *buffer, size_t buffer_length)
670 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
671 buffer, buffer_length, 0, NULL);
674 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
675 u8 cmd, void *buffer, size_t buffer_length,
676 struct pqi_raid_error_info *error_info)
678 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
679 buffer, buffer_length, 0, error_info);
682 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
683 struct bmic_identify_controller *buffer)
685 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
686 buffer, sizeof(*buffer));
689 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
690 struct bmic_sense_subsystem_info *sense_info)
692 return pqi_send_ctrl_raid_request(ctrl_info,
693 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
694 sizeof(*sense_info));
697 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
698 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
700 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
701 buffer, buffer_length, vpd_page, NULL);
704 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
705 struct pqi_scsi_dev *device,
706 struct bmic_identify_physical_device *buffer, size_t buffer_length)
709 enum dma_data_direction dir;
710 u16 bmic_device_index;
711 struct pqi_raid_path_request request;
713 rc = pqi_build_raid_path_request(ctrl_info, &request,
714 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
715 buffer_length, 0, &dir);
719 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
720 request.cdb[2] = (u8)bmic_device_index;
721 request.cdb[9] = (u8)(bmic_device_index >> 8);
723 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
725 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
730 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
734 bytes = get_unaligned_le16(limit);
745 struct bmic_sense_feature_buffer {
746 struct bmic_sense_feature_buffer_header header;
747 struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
752 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
753 offsetofend(struct bmic_sense_feature_buffer, \
754 aio_subpage.max_write_raid_1_10_3drive)
756 #define MINIMUM_AIO_SUBPAGE_LENGTH \
757 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
758 max_write_raid_1_10_3drive) - \
759 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
761 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
764 enum dma_data_direction dir;
765 struct pqi_raid_path_request request;
766 struct bmic_sense_feature_buffer *buffer;
768 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
772 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
773 buffer, sizeof(*buffer), 0, &dir);
777 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
778 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
780 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
782 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
787 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
788 buffer->header.subpage_code !=
789 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
790 get_unaligned_le16(&buffer->header.buffer_length) <
791 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
792 buffer->aio_subpage.header.page_code !=
793 BMIC_SENSE_FEATURE_IO_PAGE ||
794 buffer->aio_subpage.header.subpage_code !=
795 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
796 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
797 MINIMUM_AIO_SUBPAGE_LENGTH) {
801 ctrl_info->max_transfer_encrypted_sas_sata =
802 pqi_aio_limit_to_bytes(
803 &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
805 ctrl_info->max_transfer_encrypted_nvme =
806 pqi_aio_limit_to_bytes(
807 &buffer->aio_subpage.max_transfer_encrypted_nvme);
809 ctrl_info->max_write_raid_5_6 =
810 pqi_aio_limit_to_bytes(
811 &buffer->aio_subpage.max_write_raid_5_6);
813 ctrl_info->max_write_raid_1_10_2drive =
814 pqi_aio_limit_to_bytes(
815 &buffer->aio_subpage.max_write_raid_1_10_2drive);
817 ctrl_info->max_write_raid_1_10_3drive =
818 pqi_aio_limit_to_bytes(
819 &buffer->aio_subpage.max_write_raid_1_10_3drive);
827 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
828 enum bmic_flush_cache_shutdown_event shutdown_event)
831 struct bmic_flush_cache *flush_cache;
833 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
837 flush_cache->shutdown_event = shutdown_event;
839 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
840 sizeof(*flush_cache));
847 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
848 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
849 struct pqi_raid_error_info *error_info)
851 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
852 buffer, buffer_length, error_info);
855 #define PQI_FETCH_PTRAID_DATA (1 << 31)
857 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
860 struct bmic_diag_options *diag;
862 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
866 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
867 diag, sizeof(*diag));
871 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
873 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
882 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
883 void *buffer, size_t buffer_length)
885 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
886 buffer, buffer_length);
891 struct bmic_host_wellness_driver_version {
893 u8 driver_version_tag[2];
894 __le16 driver_version_length;
895 char driver_version[32];
896 u8 dont_write_tag[2];
902 static int pqi_write_driver_version_to_host_wellness(
903 struct pqi_ctrl_info *ctrl_info)
906 struct bmic_host_wellness_driver_version *buffer;
907 size_t buffer_length;
909 buffer_length = sizeof(*buffer);
911 buffer = kmalloc(buffer_length, GFP_KERNEL);
915 buffer->start_tag[0] = '<';
916 buffer->start_tag[1] = 'H';
917 buffer->start_tag[2] = 'W';
918 buffer->start_tag[3] = '>';
919 buffer->driver_version_tag[0] = 'D';
920 buffer->driver_version_tag[1] = 'V';
921 put_unaligned_le16(sizeof(buffer->driver_version),
922 &buffer->driver_version_length);
923 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
924 sizeof(buffer->driver_version) - 1);
925 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
926 buffer->dont_write_tag[0] = 'D';
927 buffer->dont_write_tag[1] = 'W';
928 buffer->end_tag[0] = 'Z';
929 buffer->end_tag[1] = 'Z';
931 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
940 struct bmic_host_wellness_time {
945 u8 dont_write_tag[2];
951 static int pqi_write_current_time_to_host_wellness(
952 struct pqi_ctrl_info *ctrl_info)
955 struct bmic_host_wellness_time *buffer;
956 size_t buffer_length;
961 buffer_length = sizeof(*buffer);
963 buffer = kmalloc(buffer_length, GFP_KERNEL);
967 buffer->start_tag[0] = '<';
968 buffer->start_tag[1] = 'H';
969 buffer->start_tag[2] = 'W';
970 buffer->start_tag[3] = '>';
971 buffer->time_tag[0] = 'T';
972 buffer->time_tag[1] = 'D';
973 put_unaligned_le16(sizeof(buffer->time),
974 &buffer->time_length);
976 local_time = ktime_get_real_seconds();
977 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
978 year = tm.tm_year + 1900;
980 buffer->time[0] = bin2bcd(tm.tm_hour);
981 buffer->time[1] = bin2bcd(tm.tm_min);
982 buffer->time[2] = bin2bcd(tm.tm_sec);
984 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
985 buffer->time[5] = bin2bcd(tm.tm_mday);
986 buffer->time[6] = bin2bcd(year / 100);
987 buffer->time[7] = bin2bcd(year % 100);
989 buffer->dont_write_tag[0] = 'D';
990 buffer->dont_write_tag[1] = 'W';
991 buffer->end_tag[0] = 'Z';
992 buffer->end_tag[1] = 'Z';
994 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1001 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
1003 static void pqi_update_time_worker(struct work_struct *work)
1006 struct pqi_ctrl_info *ctrl_info;
1008 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1011 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1013 dev_warn(&ctrl_info->pci_dev->dev,
1014 "error updating time on controller\n");
1016 schedule_delayed_work(&ctrl_info->update_time_work,
1017 PQI_UPDATE_TIME_WORK_INTERVAL);
1020 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1022 schedule_delayed_work(&ctrl_info->update_time_work, 0);
1025 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1027 cancel_delayed_work_sync(&ctrl_info->update_time_work);
1030 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1031 size_t buffer_length)
1033 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1036 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1039 size_t lun_list_length;
1040 size_t lun_data_length;
1041 size_t new_lun_list_length;
1042 void *lun_data = NULL;
1043 struct report_lun_header *report_lun_header;
1045 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1046 if (!report_lun_header) {
1051 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1055 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1058 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1060 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1066 if (lun_list_length == 0) {
1067 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1071 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1075 new_lun_list_length =
1076 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1078 if (new_lun_list_length > lun_list_length) {
1079 lun_list_length = new_lun_list_length;
1085 kfree(report_lun_header);
1097 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1099 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, buffer);
1102 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1104 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1107 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1108 struct report_phys_lun_extended **physdev_list,
1109 struct report_log_lun_extended **logdev_list)
1112 size_t logdev_list_length;
1113 size_t logdev_data_length;
1114 struct report_log_lun_extended *internal_logdev_list;
1115 struct report_log_lun_extended *logdev_data;
1116 struct report_lun_header report_lun_header;
1118 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1120 dev_err(&ctrl_info->pci_dev->dev,
1121 "report physical LUNs failed\n");
1123 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1125 dev_err(&ctrl_info->pci_dev->dev,
1126 "report logical LUNs failed\n");
1129 * Tack the controller itself onto the end of the logical device list.
1132 logdev_data = *logdev_list;
1135 logdev_list_length =
1136 get_unaligned_be32(&logdev_data->header.list_length);
1138 memset(&report_lun_header, 0, sizeof(report_lun_header));
1140 (struct report_log_lun_extended *)&report_lun_header;
1141 logdev_list_length = 0;
1144 logdev_data_length = sizeof(struct report_lun_header) +
1147 internal_logdev_list = kmalloc(logdev_data_length +
1148 sizeof(struct report_log_lun_extended), GFP_KERNEL);
1149 if (!internal_logdev_list) {
1150 kfree(*logdev_list);
1151 *logdev_list = NULL;
1155 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1156 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1157 sizeof(struct report_log_lun_extended_entry));
1158 put_unaligned_be32(logdev_list_length +
1159 sizeof(struct report_log_lun_extended_entry),
1160 &internal_logdev_list->header.list_length);
1162 kfree(*logdev_list);
1163 *logdev_list = internal_logdev_list;
1168 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1169 int bus, int target, int lun)
1172 device->target = target;
1176 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1184 scsi3addr = device->scsi3addr;
1185 lunid = get_unaligned_le32(scsi3addr);
1187 if (pqi_is_hba_lunid(scsi3addr)) {
1188 /* The specified device is the controller. */
1189 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1190 device->target_lun_valid = true;
1194 if (pqi_is_logical_device(device)) {
1195 if (device->is_external_raid_device) {
1196 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1197 target = (lunid >> 16) & 0x3fff;
1200 bus = PQI_RAID_VOLUME_BUS;
1202 lun = lunid & 0x3fff;
1204 pqi_set_bus_target_lun(device, bus, target, lun);
1205 device->target_lun_valid = true;
1210 * Defer target and LUN assignment for non-controller physical devices
1211 * because the SAS transport layer will make these assignments later.
1213 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1216 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1217 struct pqi_scsi_dev *device)
1223 raid_level = SA_RAID_UNKNOWN;
1225 buffer = kmalloc(64, GFP_KERNEL);
1227 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1228 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1230 raid_level = buffer[8];
1231 if (raid_level > SA_RAID_MAX)
1232 raid_level = SA_RAID_UNKNOWN;
1237 device->raid_level = raid_level;
1240 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1241 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1245 u32 r5or6_blocks_per_row;
1247 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1249 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1250 err_msg = "RAID map too small";
1254 if (device->raid_level == SA_RAID_1) {
1255 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1256 err_msg = "invalid RAID-1 map";
1259 } else if (device->raid_level == SA_RAID_TRIPLE) {
1260 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1261 err_msg = "invalid RAID-1(Triple) map";
1264 } else if ((device->raid_level == SA_RAID_5 ||
1265 device->raid_level == SA_RAID_6) &&
1266 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1268 r5or6_blocks_per_row =
1269 get_unaligned_le16(&raid_map->strip_size) *
1270 get_unaligned_le16(&raid_map->data_disks_per_row);
1271 if (r5or6_blocks_per_row == 0) {
1272 err_msg = "invalid RAID-5 or RAID-6 map";
1280 dev_warn(&ctrl_info->pci_dev->dev,
1281 "logical device %08x%08x %s\n",
1282 *((u32 *)&device->scsi3addr),
1283 *((u32 *)&device->scsi3addr[4]), err_msg);
1288 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1289 struct pqi_scsi_dev *device)
1293 struct raid_map *raid_map;
1295 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1299 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1300 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1304 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1306 if (raid_map_size > sizeof(*raid_map)) {
1310 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1314 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1315 device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1319 if (get_unaligned_le32(&raid_map->structure_size)
1321 dev_warn(&ctrl_info->pci_dev->dev,
1322 "requested %u bytes, received %u bytes\n",
1324 get_unaligned_le32(&raid_map->structure_size));
1329 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1333 device->raid_map = raid_map;
1343 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1344 struct pqi_scsi_dev *device)
1346 if (!ctrl_info->lv_drive_type_mix_valid) {
1347 device->max_transfer_encrypted = ~0;
1351 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1352 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1353 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1354 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1355 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1356 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1357 case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1358 case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1359 device->max_transfer_encrypted =
1360 ctrl_info->max_transfer_encrypted_sas_sata;
1362 case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1363 device->max_transfer_encrypted =
1364 ctrl_info->max_transfer_encrypted_nvme;
1366 case LV_DRIVE_TYPE_MIX_UNKNOWN:
1367 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1369 device->max_transfer_encrypted =
1370 min(ctrl_info->max_transfer_encrypted_sas_sata,
1371 ctrl_info->max_transfer_encrypted_nvme);
1376 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1377 struct pqi_scsi_dev *device)
1383 buffer = kmalloc(64, GFP_KERNEL);
1387 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1388 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1392 #define RAID_BYPASS_STATUS 4
1393 #define RAID_BYPASS_CONFIGURED 0x1
1394 #define RAID_BYPASS_ENABLED 0x2
1396 bypass_status = buffer[RAID_BYPASS_STATUS];
1397 device->raid_bypass_configured =
1398 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1399 if (device->raid_bypass_configured &&
1400 (bypass_status & RAID_BYPASS_ENABLED) &&
1401 pqi_get_raid_map(ctrl_info, device) == 0) {
1402 device->raid_bypass_enabled = true;
1403 if (get_unaligned_le16(&device->raid_map->flags) &
1404 RAID_MAP_ENCRYPTION_ENABLED)
1405 pqi_set_max_transfer_encrypted(ctrl_info, device);
1413 * Use vendor-specific VPD to determine online/offline status of a volume.
1416 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1417 struct pqi_scsi_dev *device)
1421 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1422 bool volume_offline = true;
1424 struct ciss_vpd_logical_volume_status *vpd;
1426 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1430 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1431 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1435 if (vpd->page_code != CISS_VPD_LV_STATUS)
1438 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1439 volume_status) + vpd->page_length;
1440 if (page_length < sizeof(*vpd))
1443 volume_status = vpd->volume_status;
1444 volume_flags = get_unaligned_be32(&vpd->flags);
1445 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1450 device->volume_status = volume_status;
1451 device->volume_offline = volume_offline;
1454 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
1456 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1457 struct pqi_scsi_dev *device,
1458 struct bmic_identify_physical_device *id_phys)
1462 memset(id_phys, 0, sizeof(*id_phys));
1464 rc = pqi_identify_physical_device(ctrl_info, device,
1465 id_phys, sizeof(*id_phys));
1467 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1471 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1472 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1474 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1475 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1477 device->box_index = id_phys->box_index;
1478 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1479 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1480 device->queue_depth =
1481 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1482 device->active_path_index = id_phys->active_path_number;
1483 device->path_map = id_phys->redundant_path_present_map;
1484 memcpy(&device->box,
1485 &id_phys->alternate_paths_phys_box_on_port,
1486 sizeof(device->box));
1487 memcpy(&device->phys_connector,
1488 &id_phys->alternate_paths_phys_connector,
1489 sizeof(device->phys_connector));
1490 device->bay = id_phys->phys_bay_in_box;
1492 memcpy(&device->page_83_identifier, &id_phys->page_83_identifier,
1493 sizeof(device->page_83_identifier));
1495 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1498 id_phys->phy_to_phy_map[device->active_path_index];
1500 device->phy_id = 0xFF;
1505 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1506 struct pqi_scsi_dev *device)
1511 buffer = kmalloc(64, GFP_KERNEL);
1515 /* Send an inquiry to the device to see what it is. */
1516 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1520 scsi_sanitize_inquiry_string(&buffer[8], 8);
1521 scsi_sanitize_inquiry_string(&buffer[16], 16);
1523 device->devtype = buffer[0] & 0x1f;
1524 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1525 memcpy(device->model, &buffer[16], sizeof(device->model));
1527 if (device->devtype == TYPE_DISK) {
1528 if (device->is_external_raid_device) {
1529 device->raid_level = SA_RAID_UNKNOWN;
1530 device->volume_status = CISS_LV_OK;
1531 device->volume_offline = false;
1533 pqi_get_raid_level(ctrl_info, device);
1534 pqi_get_raid_bypass_status(ctrl_info, device);
1535 pqi_get_volume_status(ctrl_info, device);
1545 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1546 struct pqi_scsi_dev *device,
1547 struct bmic_identify_physical_device *id_phys)
1551 if (device->is_expander_smp_device)
1554 if (pqi_is_logical_device(device))
1555 rc = pqi_get_logical_device_info(ctrl_info, device);
1557 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1562 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1563 struct pqi_scsi_dev *device)
1566 static const char unknown_state_str[] =
1567 "Volume is in an unknown state (%u)";
1568 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1570 switch (device->volume_status) {
1572 status = "Volume online";
1574 case CISS_LV_FAILED:
1575 status = "Volume failed";
1577 case CISS_LV_NOT_CONFIGURED:
1578 status = "Volume not configured";
1580 case CISS_LV_DEGRADED:
1581 status = "Volume degraded";
1583 case CISS_LV_READY_FOR_RECOVERY:
1584 status = "Volume ready for recovery operation";
1586 case CISS_LV_UNDERGOING_RECOVERY:
1587 status = "Volume undergoing recovery";
1589 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1590 status = "Wrong physical drive was replaced";
1592 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1593 status = "A physical drive not properly connected";
1595 case CISS_LV_HARDWARE_OVERHEATING:
1596 status = "Hardware is overheating";
1598 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1599 status = "Hardware has overheated";
1601 case CISS_LV_UNDERGOING_EXPANSION:
1602 status = "Volume undergoing expansion";
1604 case CISS_LV_NOT_AVAILABLE:
1605 status = "Volume waiting for transforming volume";
1607 case CISS_LV_QUEUED_FOR_EXPANSION:
1608 status = "Volume queued for expansion";
1610 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1611 status = "Volume disabled due to SCSI ID conflict";
1613 case CISS_LV_EJECTED:
1614 status = "Volume has been ejected";
1616 case CISS_LV_UNDERGOING_ERASE:
1617 status = "Volume undergoing background erase";
1619 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1620 status = "Volume ready for predictive spare rebuild";
1622 case CISS_LV_UNDERGOING_RPI:
1623 status = "Volume undergoing rapid parity initialization";
1625 case CISS_LV_PENDING_RPI:
1626 status = "Volume queued for rapid parity initialization";
1628 case CISS_LV_ENCRYPTED_NO_KEY:
1629 status = "Encrypted volume inaccessible - key not present";
1631 case CISS_LV_UNDERGOING_ENCRYPTION:
1632 status = "Volume undergoing encryption process";
1634 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1635 status = "Volume undergoing encryption re-keying process";
1637 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1638 status = "Volume encrypted but encryption is disabled";
1640 case CISS_LV_PENDING_ENCRYPTION:
1641 status = "Volume pending migration to encrypted state";
1643 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1644 status = "Volume pending encryption rekeying";
1646 case CISS_LV_NOT_SUPPORTED:
1647 status = "Volume not supported on this controller";
1649 case CISS_LV_STATUS_UNAVAILABLE:
1650 status = "Volume status not available";
1653 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1654 unknown_state_str, device->volume_status);
1655 status = unknown_state_buffer;
1659 dev_info(&ctrl_info->pci_dev->dev,
1660 "scsi %d:%d:%d:%d %s\n",
1661 ctrl_info->scsi_host->host_no,
1662 device->bus, device->target, device->lun, status);
1665 static void pqi_rescan_worker(struct work_struct *work)
1667 struct pqi_ctrl_info *ctrl_info;
1669 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1672 pqi_scan_scsi_devices(ctrl_info);
1675 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1676 struct pqi_scsi_dev *device)
1680 if (pqi_is_logical_device(device))
1681 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1682 device->target, device->lun);
1684 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1689 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
1691 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1695 pqi_device_remove_start(device);
1697 rc = pqi_device_wait_for_pending_io(ctrl_info, device,
1698 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1700 dev_err(&ctrl_info->pci_dev->dev,
1701 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1702 ctrl_info->scsi_host->host_no, device->bus,
1703 device->target, device->lun,
1704 atomic_read(&device->scsi_cmds_outstanding));
1706 if (pqi_is_logical_device(device))
1707 scsi_remove_device(device->sdev);
1709 pqi_remove_sas_device(device);
1712 /* Assumes the SCSI device list lock is held. */
1714 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1715 int bus, int target, int lun)
1717 struct pqi_scsi_dev *device;
1719 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1720 if (device->bus == bus && device->target == target && device->lun == lun)
1726 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1728 if (dev1->is_physical_device != dev2->is_physical_device)
1731 if (dev1->is_physical_device)
1732 return dev1->wwid == dev2->wwid;
1734 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1737 enum pqi_find_result {
1743 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1744 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1746 struct pqi_scsi_dev *device;
1748 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1749 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1750 *matching_device = device;
1751 if (pqi_device_equal(device_to_find, device)) {
1752 if (device_to_find->volume_offline)
1753 return DEVICE_CHANGED;
1756 return DEVICE_CHANGED;
1760 return DEVICE_NOT_FOUND;
1763 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1765 if (device->is_expander_smp_device)
1766 return "Enclosure SMP ";
1768 return scsi_device_type(device->devtype);
1771 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1773 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1774 char *action, struct pqi_scsi_dev *device)
1777 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1779 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1780 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1782 if (device->target_lun_valid)
1783 count += scnprintf(buffer + count,
1784 PQI_DEV_INFO_BUFFER_LENGTH - count,
1789 count += scnprintf(buffer + count,
1790 PQI_DEV_INFO_BUFFER_LENGTH - count,
1793 if (pqi_is_logical_device(device))
1794 count += scnprintf(buffer + count,
1795 PQI_DEV_INFO_BUFFER_LENGTH - count,
1797 *((u32 *)&device->scsi3addr),
1798 *((u32 *)&device->scsi3addr[4]));
1800 count += scnprintf(buffer + count,
1801 PQI_DEV_INFO_BUFFER_LENGTH - count,
1802 " %016llx", device->sas_address);
1804 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1806 pqi_device_type(device),
1810 if (pqi_is_logical_device(device)) {
1811 if (device->devtype == TYPE_DISK)
1812 count += scnprintf(buffer + count,
1813 PQI_DEV_INFO_BUFFER_LENGTH - count,
1814 "SSDSmartPathCap%c En%c %-12s",
1815 device->raid_bypass_configured ? '+' : '-',
1816 device->raid_bypass_enabled ? '+' : '-',
1817 pqi_raid_level_to_string(device->raid_level));
1819 count += scnprintf(buffer + count,
1820 PQI_DEV_INFO_BUFFER_LENGTH - count,
1821 "AIO%c", device->aio_enabled ? '+' : '-');
1822 if (device->devtype == TYPE_DISK ||
1823 device->devtype == TYPE_ZBC)
1824 count += scnprintf(buffer + count,
1825 PQI_DEV_INFO_BUFFER_LENGTH - count,
1826 " qd=%-6d", device->queue_depth);
1829 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
1832 /* Assumes the SCSI device list lock is held. */
1834 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1835 struct pqi_scsi_dev *new_device)
1837 existing_device->device_type = new_device->device_type;
1838 existing_device->bus = new_device->bus;
1839 if (new_device->target_lun_valid) {
1840 existing_device->target = new_device->target;
1841 existing_device->lun = new_device->lun;
1842 existing_device->target_lun_valid = true;
1845 if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION ||
1846 existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) &&
1847 new_device->volume_status == CISS_LV_OK)
1848 existing_device->rescan = true;
1850 /* By definition, the scsi3addr and wwid fields are already the same. */
1852 existing_device->is_physical_device = new_device->is_physical_device;
1853 existing_device->is_external_raid_device =
1854 new_device->is_external_raid_device;
1855 existing_device->is_expander_smp_device =
1856 new_device->is_expander_smp_device;
1857 existing_device->aio_enabled = new_device->aio_enabled;
1858 memcpy(existing_device->vendor, new_device->vendor,
1859 sizeof(existing_device->vendor));
1860 memcpy(existing_device->model, new_device->model,
1861 sizeof(existing_device->model));
1862 existing_device->sas_address = new_device->sas_address;
1863 existing_device->raid_level = new_device->raid_level;
1864 existing_device->queue_depth = new_device->queue_depth;
1865 existing_device->aio_handle = new_device->aio_handle;
1866 existing_device->volume_status = new_device->volume_status;
1867 existing_device->active_path_index = new_device->active_path_index;
1868 existing_device->phy_id = new_device->phy_id;
1869 existing_device->path_map = new_device->path_map;
1870 existing_device->bay = new_device->bay;
1871 existing_device->box_index = new_device->box_index;
1872 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
1873 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
1874 memcpy(existing_device->box, new_device->box,
1875 sizeof(existing_device->box));
1876 memcpy(existing_device->phys_connector, new_device->phys_connector,
1877 sizeof(existing_device->phys_connector));
1878 existing_device->next_bypass_group = 0;
1879 kfree(existing_device->raid_map);
1880 existing_device->raid_map = new_device->raid_map;
1881 existing_device->raid_bypass_configured =
1882 new_device->raid_bypass_configured;
1883 existing_device->raid_bypass_enabled =
1884 new_device->raid_bypass_enabled;
1885 existing_device->device_offline = false;
1887 /* To prevent this from being freed later. */
1888 new_device->raid_map = NULL;
1891 static inline void pqi_free_device(struct pqi_scsi_dev *device)
1894 kfree(device->raid_map);
1900 * Called when exposing a new device to the OS fails in order to re-adjust
1901 * our internal SCSI device list to match the SCSI ML's view.
1904 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1905 struct pqi_scsi_dev *device)
1907 unsigned long flags;
1909 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1910 list_del(&device->scsi_device_list_entry);
1911 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1913 /* Allow the device structure to be freed later. */
1914 device->keep_device = false;
1917 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
1919 if (device->is_expander_smp_device)
1920 return device->sas_port != NULL;
1922 return device->sdev != NULL;
1925 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1926 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1930 unsigned long flags;
1931 enum pqi_find_result find_result;
1932 struct pqi_scsi_dev *device;
1933 struct pqi_scsi_dev *next;
1934 struct pqi_scsi_dev *matching_device;
1935 LIST_HEAD(add_list);
1936 LIST_HEAD(delete_list);
1939 * The idea here is to do as little work as possible while holding the
1940 * spinlock. That's why we go to great pains to defer anything other
1941 * than updating the internal device list until after we release the
1945 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1947 /* Assume that all devices in the existing list have gone away. */
1948 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1949 device->device_gone = true;
1951 for (i = 0; i < num_new_devices; i++) {
1952 device = new_device_list[i];
1954 find_result = pqi_scsi_find_entry(ctrl_info, device,
1957 switch (find_result) {
1960 * The newly found device is already in the existing
1963 device->new_device = false;
1964 matching_device->device_gone = false;
1965 pqi_scsi_update_device(matching_device, device);
1967 case DEVICE_NOT_FOUND:
1969 * The newly found device is NOT in the existing device
1972 device->new_device = true;
1974 case DEVICE_CHANGED:
1976 * The original device has gone away and we need to add
1979 device->new_device = true;
1984 /* Process all devices that have gone away. */
1985 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1986 scsi_device_list_entry) {
1987 if (device->device_gone) {
1988 list_del_init(&device->scsi_device_list_entry);
1989 list_add_tail(&device->delete_list_entry, &delete_list);
1993 /* Process all new devices. */
1994 for (i = 0; i < num_new_devices; i++) {
1995 device = new_device_list[i];
1996 if (!device->new_device)
1998 if (device->volume_offline)
2000 list_add_tail(&device->scsi_device_list_entry,
2001 &ctrl_info->scsi_device_list);
2002 list_add_tail(&device->add_list_entry, &add_list);
2003 /* To prevent this device structure from being freed later. */
2004 device->keep_device = true;
2007 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2010 * If OFA is in progress and there are devices that need to be deleted,
2011 * allow any pending reset operations to continue and unblock any SCSI
2012 * requests before removal.
2014 if (pqi_ofa_in_progress(ctrl_info)) {
2015 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2016 if (pqi_is_device_added(device))
2017 pqi_device_remove_start(device);
2018 pqi_ctrl_unblock_device_reset(ctrl_info);
2019 pqi_scsi_unblock_requests(ctrl_info);
2022 /* Remove all devices that have gone away. */
2023 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2024 if (device->volume_offline) {
2025 pqi_dev_info(ctrl_info, "offline", device);
2026 pqi_show_volume_status(ctrl_info, device);
2028 list_del(&device->delete_list_entry);
2029 if (pqi_is_device_added(device)) {
2030 pqi_remove_device(ctrl_info, device);
2032 if (!device->volume_offline)
2033 pqi_dev_info(ctrl_info, "removed", device);
2034 pqi_free_device(device);
2039 * Notify the SCSI ML if the queue depth of any existing device has
2042 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2043 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2044 device->advertised_queue_depth = device->queue_depth;
2045 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2046 if (device->rescan) {
2047 scsi_rescan_device(&device->sdev->sdev_gendev);
2048 device->rescan = false;
2053 /* Expose any new devices. */
2054 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2055 if (!pqi_is_device_added(device)) {
2056 rc = pqi_add_device(ctrl_info, device);
2058 pqi_dev_info(ctrl_info, "added", device);
2060 dev_warn(&ctrl_info->pci_dev->dev,
2061 "scsi %d:%d:%d:%d addition failed, device not added\n",
2062 ctrl_info->scsi_host->host_no,
2063 device->bus, device->target,
2065 pqi_fixup_botched_add(ctrl_info, device);
2071 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2074 * Only support the HBA controller itself as a RAID
2075 * controller. If it's a RAID controller other than
2076 * the HBA itself (an external RAID controller, for
2077 * example), we don't support it.
2079 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2080 !pqi_is_hba_lunid(device->scsi3addr))
2086 static inline bool pqi_skip_device(u8 *scsi3addr)
2088 /* Ignore all masked devices. */
2089 if (MASKED_DEVICE(scsi3addr))
2095 static inline void pqi_mask_device(u8 *scsi3addr)
2097 scsi3addr[3] |= 0xc0;
2100 static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
2102 switch (device->device_type) {
2103 case SA_DEVICE_TYPE_SAS:
2104 case SA_DEVICE_TYPE_EXPANDER_SMP:
2105 case SA_DEVICE_TYPE_SES:
2112 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2114 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2117 static inline void pqi_set_physical_device_wwid(struct pqi_ctrl_info *ctrl_info,
2118 struct pqi_scsi_dev *device, struct report_phys_lun_extended_entry *phys_lun_ext_entry)
2120 if (ctrl_info->unique_wwid_in_report_phys_lun_supported ||
2121 pqi_is_device_with_sas_address(device))
2122 device->wwid = phys_lun_ext_entry->wwid;
2124 device->wwid = cpu_to_be64(get_unaligned_be64(&device->page_83_identifier));
2127 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2131 LIST_HEAD(new_device_list_head);
2132 struct report_phys_lun_extended *physdev_list = NULL;
2133 struct report_log_lun_extended *logdev_list = NULL;
2134 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
2135 struct report_log_lun_extended_entry *log_lun_ext_entry;
2136 struct bmic_identify_physical_device *id_phys = NULL;
2139 struct pqi_scsi_dev **new_device_list = NULL;
2140 struct pqi_scsi_dev *device;
2141 struct pqi_scsi_dev *next;
2142 unsigned int num_new_devices;
2143 unsigned int num_valid_devices;
2144 bool is_physical_device;
2146 unsigned int physical_index;
2147 unsigned int logical_index;
2148 static char *out_of_memory_msg =
2149 "failed to allocate memory, device discovery stopped";
2151 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2157 get_unaligned_be32(&physdev_list->header.list_length)
2158 / sizeof(physdev_list->lun_entries[0]);
2164 get_unaligned_be32(&logdev_list->header.list_length)
2165 / sizeof(logdev_list->lun_entries[0]);
2169 if (num_physicals) {
2171 * We need this buffer for calls to pqi_get_physical_disk_info()
2172 * below. We allocate it here instead of inside
2173 * pqi_get_physical_disk_info() because it's a fairly large
2176 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2178 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2184 if (pqi_hide_vsep) {
2185 for (i = num_physicals - 1; i >= 0; i--) {
2186 phys_lun_ext_entry =
2187 &physdev_list->lun_entries[i];
2188 if (CISS_GET_DRIVE_NUMBER(phys_lun_ext_entry->lunid) == PQI_VSEP_CISS_BTL) {
2189 pqi_mask_device(phys_lun_ext_entry->lunid);
2197 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2198 ctrl_info->lv_drive_type_mix_valid = true;
2200 num_new_devices = num_physicals + num_logicals;
2202 new_device_list = kmalloc_array(num_new_devices,
2203 sizeof(*new_device_list),
2205 if (!new_device_list) {
2206 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2211 for (i = 0; i < num_new_devices; i++) {
2212 device = kzalloc(sizeof(*device), GFP_KERNEL);
2214 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2219 list_add_tail(&device->new_device_list_entry,
2220 &new_device_list_head);
2224 num_valid_devices = 0;
2228 for (i = 0; i < num_new_devices; i++) {
2230 if ((!pqi_expose_ld_first && i < num_physicals) ||
2231 (pqi_expose_ld_first && i >= num_logicals)) {
2232 is_physical_device = true;
2233 phys_lun_ext_entry =
2234 &physdev_list->lun_entries[physical_index++];
2235 log_lun_ext_entry = NULL;
2236 scsi3addr = phys_lun_ext_entry->lunid;
2238 is_physical_device = false;
2239 phys_lun_ext_entry = NULL;
2241 &logdev_list->lun_entries[logical_index++];
2242 scsi3addr = log_lun_ext_entry->lunid;
2245 if (is_physical_device && pqi_skip_device(scsi3addr))
2249 device = list_next_entry(device, new_device_list_entry);
2251 device = list_first_entry(&new_device_list_head,
2252 struct pqi_scsi_dev, new_device_list_entry);
2254 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2255 device->is_physical_device = is_physical_device;
2256 if (is_physical_device) {
2257 device->device_type = phys_lun_ext_entry->device_type;
2258 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2259 device->is_expander_smp_device = true;
2261 device->is_external_raid_device =
2262 pqi_is_external_raid_addr(scsi3addr);
2265 if (!pqi_is_supported_device(device))
2268 /* Gather information about the device. */
2269 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2270 if (rc == -ENOMEM) {
2271 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2276 if (device->is_physical_device)
2277 dev_warn(&ctrl_info->pci_dev->dev,
2278 "obtaining device info failed, skipping physical device %016llx\n",
2279 get_unaligned_be64(&phys_lun_ext_entry->wwid));
2281 dev_warn(&ctrl_info->pci_dev->dev,
2282 "obtaining device info failed, skipping logical device %08x%08x\n",
2283 *((u32 *)&device->scsi3addr),
2284 *((u32 *)&device->scsi3addr[4]));
2289 pqi_assign_bus_target_lun(device);
2291 if (device->is_physical_device) {
2292 pqi_set_physical_device_wwid(ctrl_info, device, phys_lun_ext_entry);
2293 if ((phys_lun_ext_entry->device_flags &
2294 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2295 phys_lun_ext_entry->aio_handle) {
2296 device->aio_enabled = true;
2297 device->aio_handle =
2298 phys_lun_ext_entry->aio_handle;
2301 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
2302 sizeof(device->volume_id));
2305 if (pqi_is_device_with_sas_address(device))
2306 device->sas_address = get_unaligned_be64(&device->wwid);
2308 new_device_list[num_valid_devices++] = device;
2311 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2314 list_for_each_entry_safe(device, next, &new_device_list_head,
2315 new_device_list_entry) {
2316 if (device->keep_device)
2318 list_del(&device->new_device_list_entry);
2319 pqi_free_device(device);
2322 kfree(new_device_list);
2323 kfree(physdev_list);
2330 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2335 if (pqi_ctrl_offline(ctrl_info))
2338 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2340 if (!mutex_acquired) {
2341 if (pqi_ctrl_scan_blocked(ctrl_info))
2343 pqi_schedule_rescan_worker_delayed(ctrl_info);
2344 return -EINPROGRESS;
2347 rc = pqi_update_scsi_devices(ctrl_info);
2348 if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2349 pqi_schedule_rescan_worker_delayed(ctrl_info);
2351 mutex_unlock(&ctrl_info->scan_mutex);
2356 static void pqi_scan_start(struct Scsi_Host *shost)
2358 struct pqi_ctrl_info *ctrl_info;
2360 ctrl_info = shost_to_hba(shost);
2362 pqi_scan_scsi_devices(ctrl_info);
2365 /* Returns TRUE if scan is finished. */
2367 static int pqi_scan_finished(struct Scsi_Host *shost,
2368 unsigned long elapsed_time)
2370 struct pqi_ctrl_info *ctrl_info;
2372 ctrl_info = shost_priv(shost);
2374 return !mutex_is_locked(&ctrl_info->scan_mutex);
2377 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2378 struct raid_map *raid_map, u64 first_block)
2380 u32 volume_blk_size;
2383 * Set the encryption tweak values based on logical block address.
2384 * If the block size is 512, the tweak value is equal to the LBA.
2385 * For other block sizes, tweak value is (LBA * block size) / 512.
2387 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2388 if (volume_blk_size != 512)
2389 first_block = (first_block * volume_blk_size) / 512;
2391 encryption_info->data_encryption_key_index =
2392 get_unaligned_le16(&raid_map->data_encryption_key_index);
2393 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2394 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2398 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2401 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2402 struct pqi_scsi_dev_raid_map_data *rmd)
2404 bool is_supported = true;
2406 switch (rmd->raid_level) {
2410 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2411 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2412 is_supported = false;
2414 case SA_RAID_TRIPLE:
2415 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2416 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2417 is_supported = false;
2420 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2421 rmd->data_length > ctrl_info->max_write_raid_5_6))
2422 is_supported = false;
2425 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2426 rmd->data_length > ctrl_info->max_write_raid_5_6))
2427 is_supported = false;
2430 is_supported = false;
2434 return is_supported;
2437 #define PQI_RAID_BYPASS_INELIGIBLE 1
2439 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2440 struct pqi_scsi_dev_raid_map_data *rmd)
2442 /* Check for valid opcode, get LBA and block count. */
2443 switch (scmd->cmnd[0]) {
2445 rmd->is_write = true;
2448 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2449 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2450 rmd->block_cnt = (u32)scmd->cmnd[4];
2451 if (rmd->block_cnt == 0)
2452 rmd->block_cnt = 256;
2455 rmd->is_write = true;
2458 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2459 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2462 rmd->is_write = true;
2465 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2466 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2469 rmd->is_write = true;
2472 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2473 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2476 /* Process via normal I/O path. */
2477 return PQI_RAID_BYPASS_INELIGIBLE;
2480 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2485 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2486 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2488 #if BITS_PER_LONG == 32
2492 rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2494 /* Check for invalid block or wraparound. */
2495 if (rmd->last_block >=
2496 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2497 rmd->last_block < rmd->first_block)
2498 return PQI_RAID_BYPASS_INELIGIBLE;
2500 rmd->data_disks_per_row =
2501 get_unaligned_le16(&raid_map->data_disks_per_row);
2502 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2503 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2505 /* Calculate stripe information for the request. */
2506 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2507 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2508 return PQI_RAID_BYPASS_INELIGIBLE;
2509 #if BITS_PER_LONG == 32
2510 tmpdiv = rmd->first_block;
2511 do_div(tmpdiv, rmd->blocks_per_row);
2512 rmd->first_row = tmpdiv;
2513 tmpdiv = rmd->last_block;
2514 do_div(tmpdiv, rmd->blocks_per_row);
2515 rmd->last_row = tmpdiv;
2516 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2517 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2518 tmpdiv = rmd->first_row_offset;
2519 do_div(tmpdiv, rmd->strip_size);
2520 rmd->first_column = tmpdiv;
2521 tmpdiv = rmd->last_row_offset;
2522 do_div(tmpdiv, rmd->strip_size);
2523 rmd->last_column = tmpdiv;
2525 rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2526 rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2527 rmd->first_row_offset = (u32)(rmd->first_block -
2528 (rmd->first_row * rmd->blocks_per_row));
2529 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2530 rmd->blocks_per_row));
2531 rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2532 rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2535 /* If this isn't a single row/column then give to the controller. */
2536 if (rmd->first_row != rmd->last_row ||
2537 rmd->first_column != rmd->last_column)
2538 return PQI_RAID_BYPASS_INELIGIBLE;
2540 /* Proceeding with driver mapping. */
2541 rmd->total_disks_per_row = rmd->data_disks_per_row +
2542 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2543 rmd->map_row = ((u32)(rmd->first_row >>
2544 raid_map->parity_rotation_shift)) %
2545 get_unaligned_le16(&raid_map->row_cnt);
2546 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2552 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2553 struct raid_map *raid_map)
2555 #if BITS_PER_LONG == 32
2559 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2560 return PQI_RAID_BYPASS_INELIGIBLE;
2563 /* Verify first and last block are in same RAID group. */
2564 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2565 #if BITS_PER_LONG == 32
2566 tmpdiv = rmd->first_block;
2567 rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2568 tmpdiv = rmd->first_group;
2569 do_div(tmpdiv, rmd->blocks_per_row);
2570 rmd->first_group = tmpdiv;
2571 tmpdiv = rmd->last_block;
2572 rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2573 tmpdiv = rmd->last_group;
2574 do_div(tmpdiv, rmd->blocks_per_row);
2575 rmd->last_group = tmpdiv;
2577 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2578 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2580 if (rmd->first_group != rmd->last_group)
2581 return PQI_RAID_BYPASS_INELIGIBLE;
2583 /* Verify request is in a single row of RAID 5/6. */
2584 #if BITS_PER_LONG == 32
2585 tmpdiv = rmd->first_block;
2586 do_div(tmpdiv, rmd->stripesize);
2587 rmd->first_row = tmpdiv;
2588 rmd->r5or6_first_row = tmpdiv;
2589 tmpdiv = rmd->last_block;
2590 do_div(tmpdiv, rmd->stripesize);
2591 rmd->r5or6_last_row = tmpdiv;
2593 rmd->first_row = rmd->r5or6_first_row =
2594 rmd->first_block / rmd->stripesize;
2595 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2597 if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2598 return PQI_RAID_BYPASS_INELIGIBLE;
2600 /* Verify request is in a single column. */
2601 #if BITS_PER_LONG == 32
2602 tmpdiv = rmd->first_block;
2603 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2604 tmpdiv = rmd->first_row_offset;
2605 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2606 rmd->r5or6_first_row_offset = rmd->first_row_offset;
2607 tmpdiv = rmd->last_block;
2608 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2609 tmpdiv = rmd->r5or6_last_row_offset;
2610 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2611 tmpdiv = rmd->r5or6_first_row_offset;
2612 do_div(tmpdiv, rmd->strip_size);
2613 rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2614 tmpdiv = rmd->r5or6_last_row_offset;
2615 do_div(tmpdiv, rmd->strip_size);
2616 rmd->r5or6_last_column = tmpdiv;
2618 rmd->first_row_offset = rmd->r5or6_first_row_offset =
2619 (u32)((rmd->first_block % rmd->stripesize) %
2620 rmd->blocks_per_row);
2622 rmd->r5or6_last_row_offset =
2623 (u32)((rmd->last_block % rmd->stripesize) %
2624 rmd->blocks_per_row);
2627 rmd->r5or6_first_row_offset / rmd->strip_size;
2628 rmd->r5or6_first_column = rmd->first_column;
2629 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2631 if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2632 return PQI_RAID_BYPASS_INELIGIBLE;
2634 /* Request is eligible. */
2636 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2637 get_unaligned_le16(&raid_map->row_cnt);
2639 rmd->map_index = (rmd->first_group *
2640 (get_unaligned_le16(&raid_map->row_cnt) *
2641 rmd->total_disks_per_row)) +
2642 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2644 if (rmd->is_write) {
2648 * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2649 * parity entries inside the device's raid_map.
2651 * A device's RAID map is bounded by: number of RAID disks squared.
2653 * The devices RAID map size is checked during device
2656 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2657 index *= rmd->total_disks_per_row;
2658 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2660 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2661 if (rmd->raid_level == SA_RAID_6) {
2662 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2663 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2665 #if BITS_PER_LONG == 32
2666 tmpdiv = rmd->first_block;
2667 do_div(tmpdiv, rmd->blocks_per_row);
2670 rmd->row = rmd->first_block / rmd->blocks_per_row;
2677 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2679 /* Build the new CDB for the physical disk I/O. */
2680 if (rmd->disk_block > 0xffffffff) {
2681 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2683 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2684 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2687 rmd->cdb_length = 16;
2689 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2691 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2693 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2695 rmd->cdb_length = 10;
2699 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2700 struct pqi_scsi_dev_raid_map_data *rmd)
2705 group = rmd->map_index / rmd->data_disks_per_row;
2707 index = rmd->map_index - (group * rmd->data_disks_per_row);
2708 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2709 index += rmd->data_disks_per_row;
2710 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2711 if (rmd->layout_map_count > 2) {
2712 index += rmd->data_disks_per_row;
2713 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2716 rmd->num_it_nexus_entries = rmd->layout_map_count;
2719 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2720 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2721 struct pqi_queue_group *queue_group)
2724 struct raid_map *raid_map;
2726 u32 next_bypass_group;
2727 struct pqi_encryption_info *encryption_info_ptr;
2728 struct pqi_encryption_info encryption_info;
2729 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2731 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2733 return PQI_RAID_BYPASS_INELIGIBLE;
2735 rmd.raid_level = device->raid_level;
2737 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2738 return PQI_RAID_BYPASS_INELIGIBLE;
2740 if (unlikely(rmd.block_cnt == 0))
2741 return PQI_RAID_BYPASS_INELIGIBLE;
2743 raid_map = device->raid_map;
2745 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2747 return PQI_RAID_BYPASS_INELIGIBLE;
2749 if (device->raid_level == SA_RAID_1 ||
2750 device->raid_level == SA_RAID_TRIPLE) {
2752 pqi_calc_aio_r1_nexus(raid_map, &rmd);
2754 group = device->next_bypass_group;
2755 next_bypass_group = group + 1;
2756 if (next_bypass_group >= rmd.layout_map_count)
2757 next_bypass_group = 0;
2758 device->next_bypass_group = next_bypass_group;
2759 rmd.map_index += group * rmd.data_disks_per_row;
2761 } else if ((device->raid_level == SA_RAID_5 ||
2762 device->raid_level == SA_RAID_6) &&
2763 (rmd.layout_map_count > 1 || rmd.is_write)) {
2764 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
2766 return PQI_RAID_BYPASS_INELIGIBLE;
2769 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
2770 return PQI_RAID_BYPASS_INELIGIBLE;
2772 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
2773 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2774 rmd.first_row * rmd.strip_size +
2775 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
2776 rmd.disk_block_cnt = rmd.block_cnt;
2778 /* Handle differing logical/physical block sizes. */
2779 if (raid_map->phys_blk_shift) {
2780 rmd.disk_block <<= raid_map->phys_blk_shift;
2781 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
2784 if (unlikely(rmd.disk_block_cnt > 0xffff))
2785 return PQI_RAID_BYPASS_INELIGIBLE;
2787 pqi_set_aio_cdb(&rmd);
2789 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
2790 if (rmd.data_length > device->max_transfer_encrypted)
2791 return PQI_RAID_BYPASS_INELIGIBLE;
2792 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
2793 encryption_info_ptr = &encryption_info;
2795 encryption_info_ptr = NULL;
2799 switch (device->raid_level) {
2801 case SA_RAID_TRIPLE:
2802 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
2803 encryption_info_ptr, device, &rmd);
2806 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
2807 encryption_info_ptr, device, &rmd);
2811 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
2812 rmd.cdb, rmd.cdb_length, queue_group,
2813 encryption_info_ptr, true);
2816 #define PQI_STATUS_IDLE 0x0
2818 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2819 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2821 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2822 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2823 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2824 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2825 #define PQI_DEVICE_STATE_ERROR 0x4
2827 #define PQI_MODE_READY_TIMEOUT_SECS 30
2828 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2830 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2832 struct pqi_device_registers __iomem *pqi_registers;
2833 unsigned long timeout;
2837 pqi_registers = ctrl_info->pqi_registers;
2838 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
2841 signature = readq(&pqi_registers->signature);
2842 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2843 sizeof(signature)) == 0)
2845 if (time_after(jiffies, timeout)) {
2846 dev_err(&ctrl_info->pci_dev->dev,
2847 "timed out waiting for PQI signature\n");
2850 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2854 status = readb(&pqi_registers->function_and_status_code);
2855 if (status == PQI_STATUS_IDLE)
2857 if (time_after(jiffies, timeout)) {
2858 dev_err(&ctrl_info->pci_dev->dev,
2859 "timed out waiting for PQI IDLE\n");
2862 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2866 if (readl(&pqi_registers->device_status) ==
2867 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2869 if (time_after(jiffies, timeout)) {
2870 dev_err(&ctrl_info->pci_dev->dev,
2871 "timed out waiting for PQI all registers ready\n");
2874 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2880 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2882 struct pqi_scsi_dev *device;
2884 device = io_request->scmd->device->hostdata;
2885 device->raid_bypass_enabled = false;
2886 device->aio_enabled = false;
2889 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
2891 struct pqi_ctrl_info *ctrl_info;
2892 struct pqi_scsi_dev *device;
2894 device = sdev->hostdata;
2895 if (device->device_offline)
2898 device->device_offline = true;
2899 ctrl_info = shost_to_hba(sdev->host);
2900 pqi_schedule_rescan_worker(ctrl_info);
2901 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
2902 path, ctrl_info->scsi_host->host_no, device->bus,
2903 device->target, device->lun);
2906 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2910 struct scsi_cmnd *scmd;
2911 struct pqi_raid_error_info *error_info;
2912 size_t sense_data_length;
2915 struct scsi_sense_hdr sshdr;
2917 scmd = io_request->scmd;
2921 error_info = io_request->error_info;
2922 scsi_status = error_info->status;
2925 switch (error_info->data_out_result) {
2926 case PQI_DATA_IN_OUT_GOOD:
2928 case PQI_DATA_IN_OUT_UNDERFLOW:
2930 get_unaligned_le32(&error_info->data_out_transferred);
2931 residual_count = scsi_bufflen(scmd) - xfer_count;
2932 scsi_set_resid(scmd, residual_count);
2933 if (xfer_count < scmd->underflow)
2934 host_byte = DID_SOFT_ERROR;
2936 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2937 case PQI_DATA_IN_OUT_ABORTED:
2938 host_byte = DID_ABORT;
2940 case PQI_DATA_IN_OUT_TIMEOUT:
2941 host_byte = DID_TIME_OUT;
2943 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2944 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2945 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2946 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2947 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2948 case PQI_DATA_IN_OUT_ERROR:
2949 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2950 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2951 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2952 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2953 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2954 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2955 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2956 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2957 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2958 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2960 host_byte = DID_ERROR;
2964 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2965 if (sense_data_length == 0)
2967 get_unaligned_le16(&error_info->response_data_length);
2968 if (sense_data_length) {
2969 if (sense_data_length > sizeof(error_info->data))
2970 sense_data_length = sizeof(error_info->data);
2972 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2973 scsi_normalize_sense(error_info->data,
2974 sense_data_length, &sshdr) &&
2975 sshdr.sense_key == HARDWARE_ERROR &&
2976 sshdr.asc == 0x3e) {
2977 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
2978 struct pqi_scsi_dev *device = scmd->device->hostdata;
2980 switch (sshdr.ascq) {
2981 case 0x1: /* LOGICAL UNIT FAILURE */
2982 if (printk_ratelimit())
2983 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
2984 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2985 pqi_take_device_offline(scmd->device, "RAID");
2986 host_byte = DID_NO_CONNECT;
2989 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
2990 if (printk_ratelimit())
2991 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
2992 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2997 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2998 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2999 memcpy(scmd->sense_buffer, error_info->data,
3003 scmd->result = scsi_status;
3004 set_host_byte(scmd, host_byte);
3007 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3011 struct scsi_cmnd *scmd;
3012 struct pqi_aio_error_info *error_info;
3013 size_t sense_data_length;
3016 bool device_offline;
3018 scmd = io_request->scmd;
3019 error_info = io_request->error_info;
3021 sense_data_length = 0;
3022 device_offline = false;
3024 switch (error_info->service_response) {
3025 case PQI_AIO_SERV_RESPONSE_COMPLETE:
3026 scsi_status = error_info->status;
3028 case PQI_AIO_SERV_RESPONSE_FAILURE:
3029 switch (error_info->status) {
3030 case PQI_AIO_STATUS_IO_ABORTED:
3031 scsi_status = SAM_STAT_TASK_ABORTED;
3033 case PQI_AIO_STATUS_UNDERRUN:
3034 scsi_status = SAM_STAT_GOOD;
3035 residual_count = get_unaligned_le32(
3036 &error_info->residual_count);
3037 scsi_set_resid(scmd, residual_count);
3038 xfer_count = scsi_bufflen(scmd) - residual_count;
3039 if (xfer_count < scmd->underflow)
3040 host_byte = DID_SOFT_ERROR;
3042 case PQI_AIO_STATUS_OVERRUN:
3043 scsi_status = SAM_STAT_GOOD;
3045 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3046 pqi_aio_path_disabled(io_request);
3047 scsi_status = SAM_STAT_GOOD;
3048 io_request->status = -EAGAIN;
3050 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3051 case PQI_AIO_STATUS_INVALID_DEVICE:
3052 if (!io_request->raid_bypass) {
3053 device_offline = true;
3054 pqi_take_device_offline(scmd->device, "AIO");
3055 host_byte = DID_NO_CONNECT;
3057 scsi_status = SAM_STAT_CHECK_CONDITION;
3059 case PQI_AIO_STATUS_IO_ERROR:
3061 scsi_status = SAM_STAT_CHECK_CONDITION;
3065 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3066 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3067 scsi_status = SAM_STAT_GOOD;
3069 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3070 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3072 scsi_status = SAM_STAT_CHECK_CONDITION;
3076 if (error_info->data_present) {
3078 get_unaligned_le16(&error_info->data_length);
3079 if (sense_data_length) {
3080 if (sense_data_length > sizeof(error_info->data))
3081 sense_data_length = sizeof(error_info->data);
3082 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3083 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3084 memcpy(scmd->sense_buffer, error_info->data,
3089 if (device_offline && sense_data_length == 0)
3090 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3092 scmd->result = scsi_status;
3093 set_host_byte(scmd, host_byte);
3096 static void pqi_process_io_error(unsigned int iu_type,
3097 struct pqi_io_request *io_request)
3100 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3101 pqi_process_raid_io_error(io_request);
3103 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3104 pqi_process_aio_io_error(io_request);
3109 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3110 struct pqi_task_management_response *response)
3114 switch (response->response_code) {
3115 case SOP_TMF_COMPLETE:
3116 case SOP_TMF_FUNCTION_SUCCEEDED:
3119 case SOP_TMF_REJECTED:
3128 dev_err(&ctrl_info->pci_dev->dev,
3129 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3134 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
3136 pqi_take_ctrl_offline(ctrl_info);
3139 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3144 struct pqi_io_request *io_request;
3145 struct pqi_io_response *response;
3149 oq_ci = queue_group->oq_ci_copy;
3152 oq_pi = readl(queue_group->oq_pi);
3153 if (oq_pi >= ctrl_info->num_elements_per_oq) {
3154 pqi_invalid_response(ctrl_info);
3155 dev_err(&ctrl_info->pci_dev->dev,
3156 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3157 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3164 response = queue_group->oq_element_array +
3165 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3167 request_id = get_unaligned_le16(&response->request_id);
3168 if (request_id >= ctrl_info->max_io_slots) {
3169 pqi_invalid_response(ctrl_info);
3170 dev_err(&ctrl_info->pci_dev->dev,
3171 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
3172 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3176 io_request = &ctrl_info->io_request_pool[request_id];
3177 if (atomic_read(&io_request->refcount) == 0) {
3178 pqi_invalid_response(ctrl_info);
3179 dev_err(&ctrl_info->pci_dev->dev,
3180 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
3181 request_id, oq_pi, oq_ci);
3185 switch (response->header.iu_type) {
3186 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3187 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3188 if (io_request->scmd)
3189 io_request->scmd->result = 0;
3191 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3193 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3194 io_request->status =
3196 &((struct pqi_vendor_general_response *)response)->status);
3198 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3199 io_request->status = pqi_interpret_task_management_response(ctrl_info,
3202 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3203 pqi_aio_path_disabled(io_request);
3204 io_request->status = -EAGAIN;
3206 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3207 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3208 io_request->error_info = ctrl_info->error_buffer +
3209 (get_unaligned_le16(&response->error_index) *
3210 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3211 pqi_process_io_error(response->header.iu_type, io_request);
3214 pqi_invalid_response(ctrl_info);
3215 dev_err(&ctrl_info->pci_dev->dev,
3216 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
3217 response->header.iu_type, oq_pi, oq_ci);
3221 io_request->io_complete_callback(io_request, io_request->context);
3224 * Note that the I/O request structure CANNOT BE TOUCHED after
3225 * returning from the I/O completion callback!
3227 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3230 if (num_responses) {
3231 queue_group->oq_ci_copy = oq_ci;
3232 writel(oq_ci, queue_group->oq_ci);
3235 return num_responses;
3238 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3239 unsigned int ci, unsigned int elements_in_queue)
3241 unsigned int num_elements_used;
3244 num_elements_used = pi - ci;
3246 num_elements_used = elements_in_queue - ci + pi;
3248 return elements_in_queue - num_elements_used - 1;
3251 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3252 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3256 unsigned long flags;
3258 struct pqi_queue_group *queue_group;
3260 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3261 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3264 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3266 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3267 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3269 if (pqi_num_elements_free(iq_pi, iq_ci,
3270 ctrl_info->num_elements_per_iq))
3273 spin_unlock_irqrestore(
3274 &queue_group->submit_lock[RAID_PATH], flags);
3276 if (pqi_ctrl_offline(ctrl_info))
3280 next_element = queue_group->iq_element_array[RAID_PATH] +
3281 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3283 memcpy(next_element, iu, iu_length);
3285 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3286 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3289 * This write notifies the controller that an IU is available to be
3292 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3294 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3297 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3298 struct pqi_event *event)
3300 struct pqi_event_acknowledge_request request;
3302 memset(&request, 0, sizeof(request));
3304 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3305 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3306 &request.header.iu_length);
3307 request.event_type = event->event_type;
3308 put_unaligned_le16(event->event_id, &request.event_id);
3309 put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3311 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3314 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3315 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3317 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3318 struct pqi_ctrl_info *ctrl_info)
3321 unsigned long timeout;
3323 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
3326 status = pqi_read_soft_reset_status(ctrl_info);
3327 if (status & PQI_SOFT_RESET_INITIATE)
3328 return RESET_INITIATE_DRIVER;
3330 if (status & PQI_SOFT_RESET_ABORT)
3333 if (!sis_is_firmware_running(ctrl_info))
3334 return RESET_NORESPONSE;
3336 if (time_after(jiffies, timeout)) {
3337 dev_warn(&ctrl_info->pci_dev->dev,
3338 "timed out waiting for soft reset status\n");
3339 return RESET_TIMEDOUT;
3342 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3346 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3349 unsigned int delay_secs;
3350 enum pqi_soft_reset_status reset_status;
3352 if (ctrl_info->soft_reset_handshake_supported)
3353 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3355 reset_status = RESET_INITIATE_FIRMWARE;
3357 delay_secs = PQI_POST_RESET_DELAY_SECS;
3359 switch (reset_status) {
3360 case RESET_TIMEDOUT:
3361 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3363 case RESET_INITIATE_DRIVER:
3364 dev_info(&ctrl_info->pci_dev->dev,
3365 "Online Firmware Activation: resetting controller\n");
3366 sis_soft_reset(ctrl_info);
3368 case RESET_INITIATE_FIRMWARE:
3369 ctrl_info->pqi_mode_enabled = false;
3370 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3371 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3372 pqi_ofa_free_host_buffer(ctrl_info);
3373 pqi_ctrl_ofa_done(ctrl_info);
3374 dev_info(&ctrl_info->pci_dev->dev,
3375 "Online Firmware Activation: %s\n",
3376 rc == 0 ? "SUCCESS" : "FAILED");
3379 dev_info(&ctrl_info->pci_dev->dev,
3380 "Online Firmware Activation ABORTED\n");
3381 if (ctrl_info->soft_reset_handshake_supported)
3382 pqi_clear_soft_reset_status(ctrl_info);
3383 pqi_ofa_free_host_buffer(ctrl_info);
3384 pqi_ctrl_ofa_done(ctrl_info);
3385 pqi_ofa_ctrl_unquiesce(ctrl_info);
3387 case RESET_NORESPONSE:
3390 dev_err(&ctrl_info->pci_dev->dev,
3391 "unexpected Online Firmware Activation reset status: 0x%x\n",
3393 pqi_ofa_free_host_buffer(ctrl_info);
3394 pqi_ctrl_ofa_done(ctrl_info);
3395 pqi_ofa_ctrl_unquiesce(ctrl_info);
3396 pqi_take_ctrl_offline(ctrl_info);
3401 static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3403 struct pqi_ctrl_info *ctrl_info;
3405 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3407 pqi_ctrl_ofa_start(ctrl_info);
3408 pqi_ofa_setup_host_buffer(ctrl_info);
3409 pqi_ofa_host_memory_update(ctrl_info);
3412 static void pqi_ofa_quiesce_worker(struct work_struct *work)
3414 struct pqi_ctrl_info *ctrl_info;
3415 struct pqi_event *event;
3417 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3419 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3421 pqi_ofa_ctrl_quiesce(ctrl_info);
3422 pqi_acknowledge_event(ctrl_info, event);
3423 pqi_process_soft_reset(ctrl_info);
3426 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3427 struct pqi_event *event)
3433 switch (event->event_id) {
3434 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3435 dev_info(&ctrl_info->pci_dev->dev,
3436 "received Online Firmware Activation memory allocation request\n");
3437 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3439 case PQI_EVENT_OFA_QUIESCE:
3440 dev_info(&ctrl_info->pci_dev->dev,
3441 "received Online Firmware Activation quiesce request\n");
3442 schedule_work(&ctrl_info->ofa_quiesce_work);
3445 case PQI_EVENT_OFA_CANCELED:
3446 dev_info(&ctrl_info->pci_dev->dev,
3447 "received Online Firmware Activation cancel request: reason: %u\n",
3448 ctrl_info->ofa_cancel_reason);
3449 pqi_ofa_free_host_buffer(ctrl_info);
3450 pqi_ctrl_ofa_done(ctrl_info);
3453 dev_err(&ctrl_info->pci_dev->dev,
3454 "received unknown Online Firmware Activation request: event ID: %u\n",
3462 static void pqi_event_worker(struct work_struct *work)
3466 struct pqi_ctrl_info *ctrl_info;
3467 struct pqi_event *event;
3470 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3472 pqi_ctrl_busy(ctrl_info);
3473 pqi_wait_if_ctrl_blocked(ctrl_info);
3474 if (pqi_ctrl_offline(ctrl_info))
3477 rescan_needed = false;
3478 event = ctrl_info->events;
3479 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3480 if (event->pending) {
3481 event->pending = false;
3482 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3483 ack_event = pqi_ofa_process_event(ctrl_info, event);
3486 rescan_needed = true;
3489 pqi_acknowledge_event(ctrl_info, event);
3495 pqi_schedule_rescan_worker_delayed(ctrl_info);
3498 pqi_ctrl_unbusy(ctrl_info);
3501 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
3503 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3506 u32 heartbeat_count;
3507 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
3509 pqi_check_ctrl_health(ctrl_info);
3510 if (pqi_ctrl_offline(ctrl_info))
3513 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3514 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3516 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3517 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3518 dev_err(&ctrl_info->pci_dev->dev,
3519 "no heartbeat detected - last heartbeat count: %u\n",
3521 pqi_take_ctrl_offline(ctrl_info);
3525 ctrl_info->previous_num_interrupts = num_interrupts;
3528 ctrl_info->previous_heartbeat_count = heartbeat_count;
3529 mod_timer(&ctrl_info->heartbeat_timer,
3530 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3533 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3535 if (!ctrl_info->heartbeat_counter)
3538 ctrl_info->previous_num_interrupts =
3539 atomic_read(&ctrl_info->num_interrupts);
3540 ctrl_info->previous_heartbeat_count =
3541 pqi_read_heartbeat_counter(ctrl_info);
3543 ctrl_info->heartbeat_timer.expires =
3544 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3545 add_timer(&ctrl_info->heartbeat_timer);
3548 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3550 del_timer_sync(&ctrl_info->heartbeat_timer);
3553 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3554 struct pqi_event *event, struct pqi_event_response *response)
3556 switch (event->event_id) {
3557 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3558 ctrl_info->ofa_bytes_requested =
3559 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3561 case PQI_EVENT_OFA_CANCELED:
3562 ctrl_info->ofa_cancel_reason =
3563 get_unaligned_le16(&response->data.ofa_cancelled.reason);
3568 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3573 struct pqi_event_queue *event_queue;
3574 struct pqi_event_response *response;
3575 struct pqi_event *event;
3578 event_queue = &ctrl_info->event_queue;
3580 oq_ci = event_queue->oq_ci_copy;
3583 oq_pi = readl(event_queue->oq_pi);
3584 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3585 pqi_invalid_response(ctrl_info);
3586 dev_err(&ctrl_info->pci_dev->dev,
3587 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3588 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3596 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3598 event_index = pqi_event_type_to_event_index(response->event_type);
3600 if (event_index >= 0 && response->request_acknowledge) {
3601 event = &ctrl_info->events[event_index];
3602 event->pending = true;
3603 event->event_type = response->event_type;
3604 event->event_id = get_unaligned_le16(&response->event_id);
3605 event->additional_event_id =
3606 get_unaligned_le32(&response->additional_event_id);
3607 if (event->event_type == PQI_EVENT_TYPE_OFA)
3608 pqi_ofa_capture_event_payload(ctrl_info, event, response);
3611 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3615 event_queue->oq_ci_copy = oq_ci;
3616 writel(oq_ci, event_queue->oq_ci);
3617 schedule_work(&ctrl_info->event_work);
3623 #define PQI_LEGACY_INTX_MASK 0x1
3625 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3628 struct pqi_device_registers __iomem *pqi_registers;
3629 volatile void __iomem *register_addr;
3631 pqi_registers = ctrl_info->pqi_registers;
3634 register_addr = &pqi_registers->legacy_intx_mask_clear;
3636 register_addr = &pqi_registers->legacy_intx_mask_set;
3638 intx_mask = readl(register_addr);
3639 intx_mask |= PQI_LEGACY_INTX_MASK;
3640 writel(intx_mask, register_addr);
3643 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3644 enum pqi_irq_mode new_mode)
3646 switch (ctrl_info->irq_mode) {
3652 pqi_configure_legacy_intx(ctrl_info, true);
3653 sis_enable_intx(ctrl_info);
3662 pqi_configure_legacy_intx(ctrl_info, false);
3663 sis_enable_msix(ctrl_info);
3668 pqi_configure_legacy_intx(ctrl_info, false);
3675 sis_enable_msix(ctrl_info);
3678 pqi_configure_legacy_intx(ctrl_info, true);
3679 sis_enable_intx(ctrl_info);
3687 ctrl_info->irq_mode = new_mode;
3690 #define PQI_LEGACY_INTX_PENDING 0x1
3692 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3697 switch (ctrl_info->irq_mode) {
3702 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
3703 if (intx_status & PQI_LEGACY_INTX_PENDING)
3717 static irqreturn_t pqi_irq_handler(int irq, void *data)
3719 struct pqi_ctrl_info *ctrl_info;
3720 struct pqi_queue_group *queue_group;
3721 int num_io_responses_handled;
3722 int num_events_handled;
3725 ctrl_info = queue_group->ctrl_info;
3727 if (!pqi_is_valid_irq(ctrl_info))
3730 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3731 if (num_io_responses_handled < 0)
3734 if (irq == ctrl_info->event_irq) {
3735 num_events_handled = pqi_process_event_intr(ctrl_info);
3736 if (num_events_handled < 0)
3739 num_events_handled = 0;
3742 if (num_io_responses_handled + num_events_handled > 0)
3743 atomic_inc(&ctrl_info->num_interrupts);
3745 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3746 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3752 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3754 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3758 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3760 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3761 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3762 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3764 dev_err(&pci_dev->dev,
3765 "irq %u init failed with error %d\n",
3766 pci_irq_vector(pci_dev, i), rc);
3769 ctrl_info->num_msix_vectors_initialized++;
3775 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3779 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3780 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3781 &ctrl_info->queue_groups[i]);
3783 ctrl_info->num_msix_vectors_initialized = 0;
3786 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3788 int num_vectors_enabled;
3790 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3791 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3792 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3793 if (num_vectors_enabled < 0) {
3794 dev_err(&ctrl_info->pci_dev->dev,
3795 "MSI-X init failed with error %d\n",
3796 num_vectors_enabled);
3797 return num_vectors_enabled;
3800 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
3801 ctrl_info->irq_mode = IRQ_MODE_MSIX;
3805 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3807 if (ctrl_info->num_msix_vectors_enabled) {
3808 pci_free_irq_vectors(ctrl_info->pci_dev);
3809 ctrl_info->num_msix_vectors_enabled = 0;
3813 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3816 size_t alloc_length;
3817 size_t element_array_length_per_iq;
3818 size_t element_array_length_per_oq;
3819 void *element_array;
3820 void __iomem *next_queue_index;
3821 void *aligned_pointer;
3822 unsigned int num_inbound_queues;
3823 unsigned int num_outbound_queues;
3824 unsigned int num_queue_indexes;
3825 struct pqi_queue_group *queue_group;
3827 element_array_length_per_iq =
3828 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3829 ctrl_info->num_elements_per_iq;
3830 element_array_length_per_oq =
3831 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3832 ctrl_info->num_elements_per_oq;
3833 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3834 num_outbound_queues = ctrl_info->num_queue_groups;
3835 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3837 aligned_pointer = NULL;
3839 for (i = 0; i < num_inbound_queues; i++) {
3840 aligned_pointer = PTR_ALIGN(aligned_pointer,
3841 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3842 aligned_pointer += element_array_length_per_iq;
3845 for (i = 0; i < num_outbound_queues; i++) {
3846 aligned_pointer = PTR_ALIGN(aligned_pointer,
3847 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3848 aligned_pointer += element_array_length_per_oq;
3851 aligned_pointer = PTR_ALIGN(aligned_pointer,
3852 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3853 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3854 PQI_EVENT_OQ_ELEMENT_LENGTH;
3856 for (i = 0; i < num_queue_indexes; i++) {
3857 aligned_pointer = PTR_ALIGN(aligned_pointer,
3858 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3859 aligned_pointer += sizeof(pqi_index_t);
3862 alloc_length = (size_t)aligned_pointer +
3863 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3865 alloc_length += PQI_EXTRA_SGL_MEMORY;
3867 ctrl_info->queue_memory_base =
3868 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3869 &ctrl_info->queue_memory_base_dma_handle,
3872 if (!ctrl_info->queue_memory_base)
3875 ctrl_info->queue_memory_length = alloc_length;
3877 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3878 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3880 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3881 queue_group = &ctrl_info->queue_groups[i];
3882 queue_group->iq_element_array[RAID_PATH] = element_array;
3883 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3884 ctrl_info->queue_memory_base_dma_handle +
3885 (element_array - ctrl_info->queue_memory_base);
3886 element_array += element_array_length_per_iq;
3887 element_array = PTR_ALIGN(element_array,
3888 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3889 queue_group->iq_element_array[AIO_PATH] = element_array;
3890 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3891 ctrl_info->queue_memory_base_dma_handle +
3892 (element_array - ctrl_info->queue_memory_base);
3893 element_array += element_array_length_per_iq;
3894 element_array = PTR_ALIGN(element_array,
3895 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3898 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3899 queue_group = &ctrl_info->queue_groups[i];
3900 queue_group->oq_element_array = element_array;
3901 queue_group->oq_element_array_bus_addr =
3902 ctrl_info->queue_memory_base_dma_handle +
3903 (element_array - ctrl_info->queue_memory_base);
3904 element_array += element_array_length_per_oq;
3905 element_array = PTR_ALIGN(element_array,
3906 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3909 ctrl_info->event_queue.oq_element_array = element_array;
3910 ctrl_info->event_queue.oq_element_array_bus_addr =
3911 ctrl_info->queue_memory_base_dma_handle +
3912 (element_array - ctrl_info->queue_memory_base);
3913 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3914 PQI_EVENT_OQ_ELEMENT_LENGTH;
3916 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
3917 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3919 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3920 queue_group = &ctrl_info->queue_groups[i];
3921 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3922 queue_group->iq_ci_bus_addr[RAID_PATH] =
3923 ctrl_info->queue_memory_base_dma_handle +
3925 (void __iomem *)ctrl_info->queue_memory_base);
3926 next_queue_index += sizeof(pqi_index_t);
3927 next_queue_index = PTR_ALIGN(next_queue_index,
3928 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3929 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3930 queue_group->iq_ci_bus_addr[AIO_PATH] =
3931 ctrl_info->queue_memory_base_dma_handle +
3933 (void __iomem *)ctrl_info->queue_memory_base);
3934 next_queue_index += sizeof(pqi_index_t);
3935 next_queue_index = PTR_ALIGN(next_queue_index,
3936 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3937 queue_group->oq_pi = next_queue_index;
3938 queue_group->oq_pi_bus_addr =
3939 ctrl_info->queue_memory_base_dma_handle +
3941 (void __iomem *)ctrl_info->queue_memory_base);
3942 next_queue_index += sizeof(pqi_index_t);
3943 next_queue_index = PTR_ALIGN(next_queue_index,
3944 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3947 ctrl_info->event_queue.oq_pi = next_queue_index;
3948 ctrl_info->event_queue.oq_pi_bus_addr =
3949 ctrl_info->queue_memory_base_dma_handle +
3951 (void __iomem *)ctrl_info->queue_memory_base);
3956 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3959 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3960 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3963 * Initialize the backpointers to the controller structure in
3964 * each operational queue group structure.
3966 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3967 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3970 * Assign IDs to all operational queues. Note that the IDs
3971 * assigned to operational IQs are independent of the IDs
3972 * assigned to operational OQs.
3974 ctrl_info->event_queue.oq_id = next_oq_id++;
3975 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3976 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3977 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3978 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3982 * Assign MSI-X table entry indexes to all queues. Note that the
3983 * interrupt for the event queue is shared with the first queue group.
3985 ctrl_info->event_queue.int_msg_num = 0;
3986 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3987 ctrl_info->queue_groups[i].int_msg_num = i;
3989 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3990 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3991 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3992 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3993 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3997 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3999 size_t alloc_length;
4000 struct pqi_admin_queues_aligned *admin_queues_aligned;
4001 struct pqi_admin_queues *admin_queues;
4003 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4004 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4006 ctrl_info->admin_queue_memory_base =
4007 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4008 &ctrl_info->admin_queue_memory_base_dma_handle,
4011 if (!ctrl_info->admin_queue_memory_base)
4014 ctrl_info->admin_queue_memory_length = alloc_length;
4016 admin_queues = &ctrl_info->admin_queues;
4017 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4018 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4019 admin_queues->iq_element_array =
4020 &admin_queues_aligned->iq_element_array;
4021 admin_queues->oq_element_array =
4022 &admin_queues_aligned->oq_element_array;
4023 admin_queues->iq_ci =
4024 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4025 admin_queues->oq_pi =
4026 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4028 admin_queues->iq_element_array_bus_addr =
4029 ctrl_info->admin_queue_memory_base_dma_handle +
4030 (admin_queues->iq_element_array -
4031 ctrl_info->admin_queue_memory_base);
4032 admin_queues->oq_element_array_bus_addr =
4033 ctrl_info->admin_queue_memory_base_dma_handle +
4034 (admin_queues->oq_element_array -
4035 ctrl_info->admin_queue_memory_base);
4036 admin_queues->iq_ci_bus_addr =
4037 ctrl_info->admin_queue_memory_base_dma_handle +
4038 ((void __iomem *)admin_queues->iq_ci -
4039 (void __iomem *)ctrl_info->admin_queue_memory_base);
4040 admin_queues->oq_pi_bus_addr =
4041 ctrl_info->admin_queue_memory_base_dma_handle +
4042 ((void __iomem *)admin_queues->oq_pi -
4043 (void __iomem *)ctrl_info->admin_queue_memory_base);
4048 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
4049 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
4051 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4053 struct pqi_device_registers __iomem *pqi_registers;
4054 struct pqi_admin_queues *admin_queues;
4055 unsigned long timeout;
4059 pqi_registers = ctrl_info->pqi_registers;
4060 admin_queues = &ctrl_info->admin_queues;
4062 writeq((u64)admin_queues->iq_element_array_bus_addr,
4063 &pqi_registers->admin_iq_element_array_addr);
4064 writeq((u64)admin_queues->oq_element_array_bus_addr,
4065 &pqi_registers->admin_oq_element_array_addr);
4066 writeq((u64)admin_queues->iq_ci_bus_addr,
4067 &pqi_registers->admin_iq_ci_addr);
4068 writeq((u64)admin_queues->oq_pi_bus_addr,
4069 &pqi_registers->admin_oq_pi_addr);
4071 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4072 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4073 (admin_queues->int_msg_num << 16);
4074 writel(reg, &pqi_registers->admin_iq_num_elements);
4076 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4077 &pqi_registers->function_and_status_code);
4079 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4081 status = readb(&pqi_registers->function_and_status_code);
4082 if (status == PQI_STATUS_IDLE)
4084 if (time_after(jiffies, timeout))
4086 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4090 * The offset registers are not initialized to the correct
4091 * offsets until *after* the create admin queue pair command
4092 * completes successfully.
4094 admin_queues->iq_pi = ctrl_info->iomem_base +
4095 PQI_DEVICE_REGISTERS_OFFSET +
4096 readq(&pqi_registers->admin_iq_pi_offset);
4097 admin_queues->oq_ci = ctrl_info->iomem_base +
4098 PQI_DEVICE_REGISTERS_OFFSET +
4099 readq(&pqi_registers->admin_oq_ci_offset);
4104 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4105 struct pqi_general_admin_request *request)
4107 struct pqi_admin_queues *admin_queues;
4111 admin_queues = &ctrl_info->admin_queues;
4112 iq_pi = admin_queues->iq_pi_copy;
4114 next_element = admin_queues->iq_element_array +
4115 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4117 memcpy(next_element, request, sizeof(*request));
4119 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4120 admin_queues->iq_pi_copy = iq_pi;
4123 * This write notifies the controller that an IU is available to be
4126 writel(iq_pi, admin_queues->iq_pi);
4129 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
4131 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4132 struct pqi_general_admin_response *response)
4134 struct pqi_admin_queues *admin_queues;
4137 unsigned long timeout;
4139 admin_queues = &ctrl_info->admin_queues;
4140 oq_ci = admin_queues->oq_ci_copy;
4142 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
4145 oq_pi = readl(admin_queues->oq_pi);
4148 if (time_after(jiffies, timeout)) {
4149 dev_err(&ctrl_info->pci_dev->dev,
4150 "timed out waiting for admin response\n");
4153 if (!sis_is_firmware_running(ctrl_info))
4155 usleep_range(1000, 2000);
4158 memcpy(response, admin_queues->oq_element_array +
4159 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4161 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4162 admin_queues->oq_ci_copy = oq_ci;
4163 writel(oq_ci, admin_queues->oq_ci);
4168 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4169 struct pqi_queue_group *queue_group, enum pqi_io_path path,
4170 struct pqi_io_request *io_request)
4172 struct pqi_io_request *next;
4177 unsigned long flags;
4178 unsigned int num_elements_needed;
4179 unsigned int num_elements_to_end_of_queue;
4181 struct pqi_iu_header *request;
4183 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4186 io_request->queue_group = queue_group;
4187 list_add_tail(&io_request->request_list_entry,
4188 &queue_group->request_list[path]);
4191 iq_pi = queue_group->iq_pi_copy[path];
4193 list_for_each_entry_safe(io_request, next,
4194 &queue_group->request_list[path], request_list_entry) {
4196 request = io_request->iu;
4198 iu_length = get_unaligned_le16(&request->iu_length) +
4199 PQI_REQUEST_HEADER_LENGTH;
4200 num_elements_needed =
4201 DIV_ROUND_UP(iu_length,
4202 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4204 iq_ci = readl(queue_group->iq_ci[path]);
4206 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4207 ctrl_info->num_elements_per_iq))
4210 put_unaligned_le16(queue_group->oq_id,
4211 &request->response_queue_id);
4213 next_element = queue_group->iq_element_array[path] +
4214 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4216 num_elements_to_end_of_queue =
4217 ctrl_info->num_elements_per_iq - iq_pi;
4219 if (num_elements_needed <= num_elements_to_end_of_queue) {
4220 memcpy(next_element, request, iu_length);
4222 copy_count = num_elements_to_end_of_queue *
4223 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4224 memcpy(next_element, request, copy_count);
4225 memcpy(queue_group->iq_element_array[path],
4226 (u8 *)request + copy_count,
4227 iu_length - copy_count);
4230 iq_pi = (iq_pi + num_elements_needed) %
4231 ctrl_info->num_elements_per_iq;
4233 list_del(&io_request->request_list_entry);
4236 if (iq_pi != queue_group->iq_pi_copy[path]) {
4237 queue_group->iq_pi_copy[path] = iq_pi;
4239 * This write notifies the controller that one or more IUs are
4240 * available to be processed.
4242 writel(iq_pi, queue_group->iq_pi[path]);
4245 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4248 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4250 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4251 struct completion *wait)
4256 if (wait_for_completion_io_timeout(wait,
4257 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
4262 pqi_check_ctrl_health(ctrl_info);
4263 if (pqi_ctrl_offline(ctrl_info)) {
4272 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4275 struct completion *waiting = context;
4280 static int pqi_process_raid_io_error_synchronous(
4281 struct pqi_raid_error_info *error_info)
4285 switch (error_info->data_out_result) {
4286 case PQI_DATA_IN_OUT_GOOD:
4287 if (error_info->status == SAM_STAT_GOOD)
4290 case PQI_DATA_IN_OUT_UNDERFLOW:
4291 if (error_info->status == SAM_STAT_GOOD ||
4292 error_info->status == SAM_STAT_CHECK_CONDITION)
4295 case PQI_DATA_IN_OUT_ABORTED:
4296 rc = PQI_CMD_STATUS_ABORTED;
4303 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4305 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4308 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4309 struct pqi_iu_header *request, unsigned int flags,
4310 struct pqi_raid_error_info *error_info)
4313 struct pqi_io_request *io_request;
4315 DECLARE_COMPLETION_ONSTACK(wait);
4317 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4318 if (down_interruptible(&ctrl_info->sync_request_sem))
4319 return -ERESTARTSYS;
4321 down(&ctrl_info->sync_request_sem);
4324 pqi_ctrl_busy(ctrl_info);
4326 * Wait for other admin queue updates such as;
4327 * config table changes, OFA memory updates, ...
4329 if (pqi_is_blockable_request(request))
4330 pqi_wait_if_ctrl_blocked(ctrl_info);
4332 if (pqi_ctrl_offline(ctrl_info)) {
4337 io_request = pqi_alloc_io_request(ctrl_info);
4339 put_unaligned_le16(io_request->index,
4340 &(((struct pqi_raid_path_request *)request)->request_id));
4342 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4343 ((struct pqi_raid_path_request *)request)->error_index =
4344 ((struct pqi_raid_path_request *)request)->request_id;
4346 iu_length = get_unaligned_le16(&request->iu_length) +
4347 PQI_REQUEST_HEADER_LENGTH;
4348 memcpy(io_request->iu, request, iu_length);
4350 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4351 io_request->context = &wait;
4353 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4356 pqi_wait_for_completion_io(ctrl_info, &wait);
4359 if (io_request->error_info)
4360 memcpy(error_info, io_request->error_info, sizeof(*error_info));
4362 memset(error_info, 0, sizeof(*error_info));
4363 } else if (rc == 0 && io_request->error_info) {
4364 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4367 pqi_free_io_request(io_request);
4370 pqi_ctrl_unbusy(ctrl_info);
4371 up(&ctrl_info->sync_request_sem);
4376 static int pqi_validate_admin_response(
4377 struct pqi_general_admin_response *response, u8 expected_function_code)
4379 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4382 if (get_unaligned_le16(&response->header.iu_length) !=
4383 PQI_GENERAL_ADMIN_IU_LENGTH)
4386 if (response->function_code != expected_function_code)
4389 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4395 static int pqi_submit_admin_request_synchronous(
4396 struct pqi_ctrl_info *ctrl_info,
4397 struct pqi_general_admin_request *request,
4398 struct pqi_general_admin_response *response)
4402 pqi_submit_admin_request(ctrl_info, request);
4404 rc = pqi_poll_for_admin_response(ctrl_info, response);
4407 rc = pqi_validate_admin_response(response, request->function_code);
4412 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4415 struct pqi_general_admin_request request;
4416 struct pqi_general_admin_response response;
4417 struct pqi_device_capability *capability;
4418 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4420 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4424 memset(&request, 0, sizeof(request));
4426 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4427 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4428 &request.header.iu_length);
4429 request.function_code =
4430 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4431 put_unaligned_le32(sizeof(*capability),
4432 &request.data.report_device_capability.buffer_length);
4434 rc = pqi_map_single(ctrl_info->pci_dev,
4435 &request.data.report_device_capability.sg_descriptor,
4436 capability, sizeof(*capability),
4441 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4443 pqi_pci_unmap(ctrl_info->pci_dev,
4444 &request.data.report_device_capability.sg_descriptor, 1,
4450 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4455 ctrl_info->max_inbound_queues =
4456 get_unaligned_le16(&capability->max_inbound_queues);
4457 ctrl_info->max_elements_per_iq =
4458 get_unaligned_le16(&capability->max_elements_per_iq);
4459 ctrl_info->max_iq_element_length =
4460 get_unaligned_le16(&capability->max_iq_element_length)
4462 ctrl_info->max_outbound_queues =
4463 get_unaligned_le16(&capability->max_outbound_queues);
4464 ctrl_info->max_elements_per_oq =
4465 get_unaligned_le16(&capability->max_elements_per_oq);
4466 ctrl_info->max_oq_element_length =
4467 get_unaligned_le16(&capability->max_oq_element_length)
4470 sop_iu_layer_descriptor =
4471 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4473 ctrl_info->max_inbound_iu_length_per_firmware =
4475 &sop_iu_layer_descriptor->max_inbound_iu_length);
4476 ctrl_info->inbound_spanning_supported =
4477 sop_iu_layer_descriptor->inbound_spanning_supported;
4478 ctrl_info->outbound_spanning_supported =
4479 sop_iu_layer_descriptor->outbound_spanning_supported;
4487 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4489 if (ctrl_info->max_iq_element_length <
4490 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4491 dev_err(&ctrl_info->pci_dev->dev,
4492 "max. inbound queue element length of %d is less than the required length of %d\n",
4493 ctrl_info->max_iq_element_length,
4494 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4498 if (ctrl_info->max_oq_element_length <
4499 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4500 dev_err(&ctrl_info->pci_dev->dev,
4501 "max. outbound queue element length of %d is less than the required length of %d\n",
4502 ctrl_info->max_oq_element_length,
4503 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4507 if (ctrl_info->max_inbound_iu_length_per_firmware <
4508 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4509 dev_err(&ctrl_info->pci_dev->dev,
4510 "max. inbound IU length of %u is less than the min. required length of %d\n",
4511 ctrl_info->max_inbound_iu_length_per_firmware,
4512 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4516 if (!ctrl_info->inbound_spanning_supported) {
4517 dev_err(&ctrl_info->pci_dev->dev,
4518 "the controller does not support inbound spanning\n");
4522 if (ctrl_info->outbound_spanning_supported) {
4523 dev_err(&ctrl_info->pci_dev->dev,
4524 "the controller supports outbound spanning but this driver does not\n");
4531 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4534 struct pqi_event_queue *event_queue;
4535 struct pqi_general_admin_request request;
4536 struct pqi_general_admin_response response;
4538 event_queue = &ctrl_info->event_queue;
4541 * Create OQ (Outbound Queue - device to host queue) to dedicate
4544 memset(&request, 0, sizeof(request));
4545 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4546 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4547 &request.header.iu_length);
4548 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4549 put_unaligned_le16(event_queue->oq_id,
4550 &request.data.create_operational_oq.queue_id);
4551 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4552 &request.data.create_operational_oq.element_array_addr);
4553 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4554 &request.data.create_operational_oq.pi_addr);
4555 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4556 &request.data.create_operational_oq.num_elements);
4557 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4558 &request.data.create_operational_oq.element_length);
4559 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4560 put_unaligned_le16(event_queue->int_msg_num,
4561 &request.data.create_operational_oq.int_msg_num);
4563 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4568 event_queue->oq_ci = ctrl_info->iomem_base +
4569 PQI_DEVICE_REGISTERS_OFFSET +
4571 &response.data.create_operational_oq.oq_ci_offset);
4576 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4577 unsigned int group_number)
4580 struct pqi_queue_group *queue_group;
4581 struct pqi_general_admin_request request;
4582 struct pqi_general_admin_response response;
4584 queue_group = &ctrl_info->queue_groups[group_number];
4587 * Create IQ (Inbound Queue - host to device queue) for
4590 memset(&request, 0, sizeof(request));
4591 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4592 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4593 &request.header.iu_length);
4594 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4595 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4596 &request.data.create_operational_iq.queue_id);
4598 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4599 &request.data.create_operational_iq.element_array_addr);
4600 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4601 &request.data.create_operational_iq.ci_addr);
4602 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4603 &request.data.create_operational_iq.num_elements);
4604 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4605 &request.data.create_operational_iq.element_length);
4606 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4608 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4611 dev_err(&ctrl_info->pci_dev->dev,
4612 "error creating inbound RAID queue\n");
4616 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4617 PQI_DEVICE_REGISTERS_OFFSET +
4619 &response.data.create_operational_iq.iq_pi_offset);
4622 * Create IQ (Inbound Queue - host to device queue) for
4623 * Advanced I/O (AIO) path.
4625 memset(&request, 0, sizeof(request));
4626 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4627 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4628 &request.header.iu_length);
4629 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4630 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4631 &request.data.create_operational_iq.queue_id);
4632 put_unaligned_le64((u64)queue_group->
4633 iq_element_array_bus_addr[AIO_PATH],
4634 &request.data.create_operational_iq.element_array_addr);
4635 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4636 &request.data.create_operational_iq.ci_addr);
4637 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4638 &request.data.create_operational_iq.num_elements);
4639 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4640 &request.data.create_operational_iq.element_length);
4641 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4643 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4646 dev_err(&ctrl_info->pci_dev->dev,
4647 "error creating inbound AIO queue\n");
4651 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4652 PQI_DEVICE_REGISTERS_OFFSET +
4654 &response.data.create_operational_iq.iq_pi_offset);
4657 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4658 * assumed to be for RAID path I/O unless we change the queue's
4661 memset(&request, 0, sizeof(request));
4662 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4663 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4664 &request.header.iu_length);
4665 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4666 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4667 &request.data.change_operational_iq_properties.queue_id);
4668 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4669 &request.data.change_operational_iq_properties.vendor_specific);
4671 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4674 dev_err(&ctrl_info->pci_dev->dev,
4675 "error changing queue property\n");
4680 * Create OQ (Outbound Queue - device to host queue).
4682 memset(&request, 0, sizeof(request));
4683 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4684 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4685 &request.header.iu_length);
4686 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4687 put_unaligned_le16(queue_group->oq_id,
4688 &request.data.create_operational_oq.queue_id);
4689 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4690 &request.data.create_operational_oq.element_array_addr);
4691 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4692 &request.data.create_operational_oq.pi_addr);
4693 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4694 &request.data.create_operational_oq.num_elements);
4695 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4696 &request.data.create_operational_oq.element_length);
4697 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4698 put_unaligned_le16(queue_group->int_msg_num,
4699 &request.data.create_operational_oq.int_msg_num);
4701 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4704 dev_err(&ctrl_info->pci_dev->dev,
4705 "error creating outbound queue\n");
4709 queue_group->oq_ci = ctrl_info->iomem_base +
4710 PQI_DEVICE_REGISTERS_OFFSET +
4712 &response.data.create_operational_oq.oq_ci_offset);
4717 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4722 rc = pqi_create_event_queue(ctrl_info);
4724 dev_err(&ctrl_info->pci_dev->dev,
4725 "error creating event queue\n");
4729 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4730 rc = pqi_create_queue_group(ctrl_info, i);
4732 dev_err(&ctrl_info->pci_dev->dev,
4733 "error creating queue group number %u/%u\n",
4734 i, ctrl_info->num_queue_groups);
4742 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4743 (offsetof(struct pqi_event_config, descriptors) + \
4744 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4746 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4751 struct pqi_event_config *event_config;
4752 struct pqi_event_descriptor *event_descriptor;
4753 struct pqi_general_management_request request;
4755 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4760 memset(&request, 0, sizeof(request));
4762 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4763 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4764 data.report_event_configuration.sg_descriptors[1]) -
4765 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4766 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4767 &request.data.report_event_configuration.buffer_length);
4769 rc = pqi_map_single(ctrl_info->pci_dev,
4770 request.data.report_event_configuration.sg_descriptors,
4771 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4776 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
4778 pqi_pci_unmap(ctrl_info->pci_dev,
4779 request.data.report_event_configuration.sg_descriptors, 1,
4785 for (i = 0; i < event_config->num_event_descriptors; i++) {
4786 event_descriptor = &event_config->descriptors[i];
4787 if (enable_events &&
4788 pqi_is_supported_event(event_descriptor->event_type))
4789 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4790 &event_descriptor->oq_id);
4792 put_unaligned_le16(0, &event_descriptor->oq_id);
4795 memset(&request, 0, sizeof(request));
4797 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4798 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4799 data.report_event_configuration.sg_descriptors[1]) -
4800 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4801 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4802 &request.data.report_event_configuration.buffer_length);
4804 rc = pqi_map_single(ctrl_info->pci_dev,
4805 request.data.report_event_configuration.sg_descriptors,
4806 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4811 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
4813 pqi_pci_unmap(ctrl_info->pci_dev,
4814 request.data.report_event_configuration.sg_descriptors, 1,
4818 kfree(event_config);
4823 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4825 return pqi_configure_events(ctrl_info, true);
4828 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4832 size_t sg_chain_buffer_length;
4833 struct pqi_io_request *io_request;
4835 if (!ctrl_info->io_request_pool)
4838 dev = &ctrl_info->pci_dev->dev;
4839 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4840 io_request = ctrl_info->io_request_pool;
4842 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4843 kfree(io_request->iu);
4844 if (!io_request->sg_chain_buffer)
4846 dma_free_coherent(dev, sg_chain_buffer_length,
4847 io_request->sg_chain_buffer,
4848 io_request->sg_chain_buffer_dma_handle);
4852 kfree(ctrl_info->io_request_pool);
4853 ctrl_info->io_request_pool = NULL;
4856 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4858 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
4859 ctrl_info->error_buffer_length,
4860 &ctrl_info->error_buffer_dma_handle,
4862 if (!ctrl_info->error_buffer)
4868 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4871 void *sg_chain_buffer;
4872 size_t sg_chain_buffer_length;
4873 dma_addr_t sg_chain_buffer_dma_handle;
4875 struct pqi_io_request *io_request;
4877 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
4878 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4880 if (!ctrl_info->io_request_pool) {
4881 dev_err(&ctrl_info->pci_dev->dev,
4882 "failed to allocate I/O request pool\n");
4886 dev = &ctrl_info->pci_dev->dev;
4887 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4888 io_request = ctrl_info->io_request_pool;
4890 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4891 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4893 if (!io_request->iu) {
4894 dev_err(&ctrl_info->pci_dev->dev,
4895 "failed to allocate IU buffers\n");
4899 sg_chain_buffer = dma_alloc_coherent(dev,
4900 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4903 if (!sg_chain_buffer) {
4904 dev_err(&ctrl_info->pci_dev->dev,
4905 "failed to allocate PQI scatter-gather chain buffers\n");
4909 io_request->index = i;
4910 io_request->sg_chain_buffer = sg_chain_buffer;
4911 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
4918 pqi_free_all_io_requests(ctrl_info);
4924 * Calculate required resources that are sized based on max. outstanding
4925 * requests and max. transfer size.
4928 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4930 u32 max_transfer_size;
4933 ctrl_info->scsi_ml_can_queue =
4934 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4935 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4937 ctrl_info->error_buffer_length =
4938 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4941 max_transfer_size = min(ctrl_info->max_transfer_size,
4942 PQI_MAX_TRANSFER_SIZE_KDUMP);
4944 max_transfer_size = min(ctrl_info->max_transfer_size,
4945 PQI_MAX_TRANSFER_SIZE);
4947 max_sg_entries = max_transfer_size / PAGE_SIZE;
4949 /* +1 to cover when the buffer is not page-aligned. */
4952 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4954 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4956 ctrl_info->sg_chain_buffer_length =
4957 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4958 PQI_EXTRA_SGL_MEMORY;
4959 ctrl_info->sg_tablesize = max_sg_entries;
4960 ctrl_info->max_sectors = max_transfer_size / 512;
4963 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4965 int num_queue_groups;
4966 u16 num_elements_per_iq;
4967 u16 num_elements_per_oq;
4969 if (reset_devices) {
4970 num_queue_groups = 1;
4973 int max_queue_groups;
4975 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4976 ctrl_info->max_outbound_queues - 1);
4977 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4979 num_cpus = num_online_cpus();
4980 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4981 num_queue_groups = min(num_queue_groups, max_queue_groups);
4984 ctrl_info->num_queue_groups = num_queue_groups;
4985 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
4988 * Make sure that the max. inbound IU length is an even multiple
4989 * of our inbound element length.
4991 ctrl_info->max_inbound_iu_length =
4992 (ctrl_info->max_inbound_iu_length_per_firmware /
4993 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4994 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4996 num_elements_per_iq =
4997 (ctrl_info->max_inbound_iu_length /
4998 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5000 /* Add one because one element in each queue is unusable. */
5001 num_elements_per_iq++;
5003 num_elements_per_iq = min(num_elements_per_iq,
5004 ctrl_info->max_elements_per_iq);
5006 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5007 num_elements_per_oq = min(num_elements_per_oq,
5008 ctrl_info->max_elements_per_oq);
5010 ctrl_info->num_elements_per_iq = num_elements_per_iq;
5011 ctrl_info->num_elements_per_oq = num_elements_per_oq;
5013 ctrl_info->max_sg_per_iu =
5014 ((ctrl_info->max_inbound_iu_length -
5015 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5016 sizeof(struct pqi_sg_descriptor)) +
5017 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5019 ctrl_info->max_sg_per_r56_iu =
5020 ((ctrl_info->max_inbound_iu_length -
5021 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5022 sizeof(struct pqi_sg_descriptor)) +
5023 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5026 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5027 struct scatterlist *sg)
5029 u64 address = (u64)sg_dma_address(sg);
5030 unsigned int length = sg_dma_len(sg);
5032 put_unaligned_le64(address, &sg_descriptor->address);
5033 put_unaligned_le32(length, &sg_descriptor->length);
5034 put_unaligned_le32(0, &sg_descriptor->flags);
5037 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5038 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5039 int max_sg_per_iu, bool *chained)
5042 unsigned int num_sg_in_iu;
5047 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
5050 pqi_set_sg_descriptor(sg_descriptor, sg);
5057 if (i == max_sg_per_iu) {
5058 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5059 &sg_descriptor->address);
5060 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5061 &sg_descriptor->length);
5062 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5065 sg_descriptor = io_request->sg_chain_buffer;
5070 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5072 return num_sg_in_iu;
5075 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5076 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5077 struct pqi_io_request *io_request)
5082 unsigned int num_sg_in_iu;
5083 struct scatterlist *sg;
5084 struct pqi_sg_descriptor *sg_descriptor;
5086 sg_count = scsi_dma_map(scmd);
5090 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5091 PQI_REQUEST_HEADER_LENGTH;
5096 sg = scsi_sglist(scmd);
5097 sg_descriptor = request->sg_descriptors;
5099 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5100 ctrl_info->max_sg_per_iu, &chained);
5102 request->partial = chained;
5103 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5106 put_unaligned_le16(iu_length, &request->header.iu_length);
5111 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5112 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5113 struct pqi_io_request *io_request)
5118 unsigned int num_sg_in_iu;
5119 struct scatterlist *sg;
5120 struct pqi_sg_descriptor *sg_descriptor;
5122 sg_count = scsi_dma_map(scmd);
5126 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5127 PQI_REQUEST_HEADER_LENGTH;
5133 sg = scsi_sglist(scmd);
5134 sg_descriptor = request->sg_descriptors;
5136 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5137 ctrl_info->max_sg_per_iu, &chained);
5139 request->partial = chained;
5140 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5143 put_unaligned_le16(iu_length, &request->header.iu_length);
5144 request->num_sg_descriptors = num_sg_in_iu;
5149 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5150 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5151 struct pqi_io_request *io_request)
5156 unsigned int num_sg_in_iu;
5157 struct scatterlist *sg;
5158 struct pqi_sg_descriptor *sg_descriptor;
5160 sg_count = scsi_dma_map(scmd);
5164 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5165 PQI_REQUEST_HEADER_LENGTH;
5168 if (sg_count != 0) {
5169 sg = scsi_sglist(scmd);
5170 sg_descriptor = request->sg_descriptors;
5172 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5173 ctrl_info->max_sg_per_r56_iu, &chained);
5175 request->partial = chained;
5176 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5179 put_unaligned_le16(iu_length, &request->header.iu_length);
5180 request->num_sg_descriptors = num_sg_in_iu;
5185 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5186 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5187 struct pqi_io_request *io_request)
5192 unsigned int num_sg_in_iu;
5193 struct scatterlist *sg;
5194 struct pqi_sg_descriptor *sg_descriptor;
5196 sg_count = scsi_dma_map(scmd);
5200 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5201 PQI_REQUEST_HEADER_LENGTH;
5207 sg = scsi_sglist(scmd);
5208 sg_descriptor = request->sg_descriptors;
5210 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5211 ctrl_info->max_sg_per_iu, &chained);
5213 request->partial = chained;
5214 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5217 put_unaligned_le16(iu_length, &request->header.iu_length);
5218 request->num_sg_descriptors = num_sg_in_iu;
5223 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5226 struct scsi_cmnd *scmd;
5228 scmd = io_request->scmd;
5229 pqi_free_io_request(io_request);
5230 scsi_dma_unmap(scmd);
5231 pqi_scsi_done(scmd);
5234 static int pqi_raid_submit_scsi_cmd_with_io_request(
5235 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
5236 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5237 struct pqi_queue_group *queue_group)
5241 struct pqi_raid_path_request *request;
5243 io_request->io_complete_callback = pqi_raid_io_complete;
5244 io_request->scmd = scmd;
5246 request = io_request->iu;
5247 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5249 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5250 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5251 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5252 put_unaligned_le16(io_request->index, &request->request_id);
5253 request->error_index = request->request_id;
5254 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5256 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5257 memcpy(request->cdb, scmd->cmnd, cdb_length);
5259 switch (cdb_length) {
5264 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5267 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5270 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5273 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5277 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5281 switch (scmd->sc_data_direction) {
5283 request->data_direction = SOP_READ_FLAG;
5285 case DMA_FROM_DEVICE:
5286 request->data_direction = SOP_WRITE_FLAG;
5289 request->data_direction = SOP_NO_DIRECTION_FLAG;
5291 case DMA_BIDIRECTIONAL:
5292 request->data_direction = SOP_BIDIRECTIONAL;
5295 dev_err(&ctrl_info->pci_dev->dev,
5296 "unknown data direction: %d\n",
5297 scmd->sc_data_direction);
5301 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5303 pqi_free_io_request(io_request);
5304 return SCSI_MLQUEUE_HOST_BUSY;
5307 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5312 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5313 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5314 struct pqi_queue_group *queue_group)
5316 struct pqi_io_request *io_request;
5318 io_request = pqi_alloc_io_request(ctrl_info);
5320 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5321 device, scmd, queue_group);
5324 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5326 struct scsi_cmnd *scmd;
5327 struct pqi_scsi_dev *device;
5328 struct pqi_ctrl_info *ctrl_info;
5330 if (!io_request->raid_bypass)
5333 scmd = io_request->scmd;
5334 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5336 if (host_byte(scmd->result) == DID_NO_CONNECT)
5339 device = scmd->device->hostdata;
5340 if (pqi_device_offline(device) || pqi_device_in_remove(device))
5343 ctrl_info = shost_to_hba(scmd->device->host);
5344 if (pqi_ctrl_offline(ctrl_info))
5350 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5353 struct scsi_cmnd *scmd;
5355 scmd = io_request->scmd;
5356 scsi_dma_unmap(scmd);
5357 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5358 set_host_byte(scmd, DID_IMM_RETRY);
5359 scmd->SCp.this_residual++;
5362 pqi_free_io_request(io_request);
5363 pqi_scsi_done(scmd);
5366 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5367 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5368 struct pqi_queue_group *queue_group)
5370 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5371 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
5374 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5375 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5376 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5377 struct pqi_encryption_info *encryption_info, bool raid_bypass)
5380 struct pqi_io_request *io_request;
5381 struct pqi_aio_path_request *request;
5383 io_request = pqi_alloc_io_request(ctrl_info);
5384 io_request->io_complete_callback = pqi_aio_io_complete;
5385 io_request->scmd = scmd;
5386 io_request->raid_bypass = raid_bypass;
5388 request = io_request->iu;
5389 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5391 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5392 put_unaligned_le32(aio_handle, &request->nexus_id);
5393 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5394 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5395 put_unaligned_le16(io_request->index, &request->request_id);
5396 request->error_index = request->request_id;
5397 if (cdb_length > sizeof(request->cdb))
5398 cdb_length = sizeof(request->cdb);
5399 request->cdb_length = cdb_length;
5400 memcpy(request->cdb, cdb, cdb_length);
5402 switch (scmd->sc_data_direction) {
5404 request->data_direction = SOP_READ_FLAG;
5406 case DMA_FROM_DEVICE:
5407 request->data_direction = SOP_WRITE_FLAG;
5410 request->data_direction = SOP_NO_DIRECTION_FLAG;
5412 case DMA_BIDIRECTIONAL:
5413 request->data_direction = SOP_BIDIRECTIONAL;
5416 dev_err(&ctrl_info->pci_dev->dev,
5417 "unknown data direction: %d\n",
5418 scmd->sc_data_direction);
5422 if (encryption_info) {
5423 request->encryption_enable = true;
5424 put_unaligned_le16(encryption_info->data_encryption_key_index,
5425 &request->data_encryption_key_index);
5426 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5427 &request->encrypt_tweak_lower);
5428 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5429 &request->encrypt_tweak_upper);
5432 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5434 pqi_free_io_request(io_request);
5435 return SCSI_MLQUEUE_HOST_BUSY;
5438 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5443 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5444 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5445 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5446 struct pqi_scsi_dev_raid_map_data *rmd)
5449 struct pqi_io_request *io_request;
5450 struct pqi_aio_r1_path_request *r1_request;
5452 io_request = pqi_alloc_io_request(ctrl_info);
5453 io_request->io_complete_callback = pqi_aio_io_complete;
5454 io_request->scmd = scmd;
5455 io_request->raid_bypass = true;
5457 r1_request = io_request->iu;
5458 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5460 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5461 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5462 r1_request->num_drives = rmd->num_it_nexus_entries;
5463 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5464 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5465 if (rmd->num_it_nexus_entries == 3)
5466 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5468 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5469 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5470 put_unaligned_le16(io_request->index, &r1_request->request_id);
5471 r1_request->error_index = r1_request->request_id;
5472 if (rmd->cdb_length > sizeof(r1_request->cdb))
5473 rmd->cdb_length = sizeof(r1_request->cdb);
5474 r1_request->cdb_length = rmd->cdb_length;
5475 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5477 /* The direction is always write. */
5478 r1_request->data_direction = SOP_READ_FLAG;
5480 if (encryption_info) {
5481 r1_request->encryption_enable = true;
5482 put_unaligned_le16(encryption_info->data_encryption_key_index,
5483 &r1_request->data_encryption_key_index);
5484 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5485 &r1_request->encrypt_tweak_lower);
5486 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5487 &r1_request->encrypt_tweak_upper);
5490 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5492 pqi_free_io_request(io_request);
5493 return SCSI_MLQUEUE_HOST_BUSY;
5496 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5501 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5502 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5503 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5504 struct pqi_scsi_dev_raid_map_data *rmd)
5507 struct pqi_io_request *io_request;
5508 struct pqi_aio_r56_path_request *r56_request;
5510 io_request = pqi_alloc_io_request(ctrl_info);
5511 io_request->io_complete_callback = pqi_aio_io_complete;
5512 io_request->scmd = scmd;
5513 io_request->raid_bypass = true;
5515 r56_request = io_request->iu;
5516 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5518 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5519 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5521 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5523 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5524 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5525 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5526 if (rmd->raid_level == SA_RAID_6) {
5527 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5528 r56_request->xor_multiplier = rmd->xor_mult;
5530 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5531 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5532 put_unaligned_le64(rmd->row, &r56_request->row);
5534 put_unaligned_le16(io_request->index, &r56_request->request_id);
5535 r56_request->error_index = r56_request->request_id;
5537 if (rmd->cdb_length > sizeof(r56_request->cdb))
5538 rmd->cdb_length = sizeof(r56_request->cdb);
5539 r56_request->cdb_length = rmd->cdb_length;
5540 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5542 /* The direction is always write. */
5543 r56_request->data_direction = SOP_READ_FLAG;
5545 if (encryption_info) {
5546 r56_request->encryption_enable = true;
5547 put_unaligned_le16(encryption_info->data_encryption_key_index,
5548 &r56_request->data_encryption_key_index);
5549 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5550 &r56_request->encrypt_tweak_lower);
5551 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5552 &r56_request->encrypt_tweak_upper);
5555 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5557 pqi_free_io_request(io_request);
5558 return SCSI_MLQUEUE_HOST_BUSY;
5561 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5566 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5567 struct scsi_cmnd *scmd)
5571 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
5572 if (hw_queue > ctrl_info->max_hw_queue_index)
5578 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5580 if (blk_rq_is_passthrough(scmd->request))
5583 return scmd->SCp.this_residual == 0;
5587 * This function gets called just before we hand the completed SCSI request
5591 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5593 struct pqi_scsi_dev *device;
5595 if (!scmd->device) {
5596 set_host_byte(scmd, DID_NO_CONNECT);
5600 device = scmd->device->hostdata;
5602 set_host_byte(scmd, DID_NO_CONNECT);
5606 atomic_dec(&device->scsi_cmds_outstanding);
5609 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5610 struct scsi_cmnd *scmd)
5616 struct pqi_scsi_dev *device;
5617 struct pqi_stream_data *pqi_stream_data;
5618 struct pqi_scsi_dev_raid_map_data rmd;
5620 if (!ctrl_info->enable_stream_detection)
5623 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5627 /* Check writes only. */
5631 device = scmd->device->hostdata;
5633 /* Check for RAID 5/6 streams. */
5634 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5638 * If controller does not support AIO RAID{5,6} writes, need to send
5639 * requests down non-AIO path.
5641 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5642 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5646 oldest_jiffies = INT_MAX;
5647 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5648 pqi_stream_data = &device->stream_data[i];
5650 * Check for adjacent request or request is within
5651 * the previous request.
5653 if ((pqi_stream_data->next_lba &&
5654 rmd.first_block >= pqi_stream_data->next_lba) &&
5655 rmd.first_block <= pqi_stream_data->next_lba +
5657 pqi_stream_data->next_lba = rmd.first_block +
5659 pqi_stream_data->last_accessed = jiffies;
5664 if (pqi_stream_data->last_accessed == 0) {
5669 /* Find entry with oldest last accessed time. */
5670 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
5671 oldest_jiffies = pqi_stream_data->last_accessed;
5676 /* Set LRU entry. */
5677 pqi_stream_data = &device->stream_data[lru_index];
5678 pqi_stream_data->last_accessed = jiffies;
5679 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
5684 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5687 struct pqi_ctrl_info *ctrl_info;
5688 struct pqi_scsi_dev *device;
5690 struct pqi_queue_group *queue_group;
5693 device = scmd->device->hostdata;
5696 set_host_byte(scmd, DID_NO_CONNECT);
5697 pqi_scsi_done(scmd);
5701 atomic_inc(&device->scsi_cmds_outstanding);
5703 ctrl_info = shost_to_hba(shost);
5705 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
5706 set_host_byte(scmd, DID_NO_CONNECT);
5707 pqi_scsi_done(scmd);
5711 if (pqi_ctrl_blocked(ctrl_info)) {
5712 rc = SCSI_MLQUEUE_HOST_BUSY;
5717 * This is necessary because the SML doesn't zero out this field during
5722 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5723 queue_group = &ctrl_info->queue_groups[hw_queue];
5725 if (pqi_is_logical_device(device)) {
5726 raid_bypassed = false;
5727 if (device->raid_bypass_enabled &&
5728 pqi_is_bypass_eligible_request(scmd) &&
5729 !pqi_is_parity_write_stream(ctrl_info, scmd)) {
5730 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5731 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
5732 raid_bypassed = true;
5733 atomic_inc(&device->raid_bypass_cnt);
5737 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5739 if (device->aio_enabled)
5740 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5742 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5747 atomic_dec(&device->scsi_cmds_outstanding);
5752 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5753 struct pqi_queue_group *queue_group)
5756 unsigned long flags;
5759 for (path = 0; path < 2; path++) {
5762 &queue_group->submit_lock[path], flags);
5764 list_empty(&queue_group->request_list[path]);
5765 spin_unlock_irqrestore(
5766 &queue_group->submit_lock[path], flags);
5769 pqi_check_ctrl_health(ctrl_info);
5770 if (pqi_ctrl_offline(ctrl_info))
5772 usleep_range(1000, 2000);
5779 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5784 struct pqi_queue_group *queue_group;
5788 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5789 queue_group = &ctrl_info->queue_groups[i];
5791 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5795 for (path = 0; path < 2; path++) {
5796 iq_pi = queue_group->iq_pi_copy[path];
5799 iq_ci = readl(queue_group->iq_ci[path]);
5802 pqi_check_ctrl_health(ctrl_info);
5803 if (pqi_ctrl_offline(ctrl_info))
5805 usleep_range(1000, 2000);
5813 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5814 struct pqi_scsi_dev *device)
5818 struct pqi_queue_group *queue_group;
5819 unsigned long flags;
5820 struct pqi_io_request *io_request;
5821 struct pqi_io_request *next;
5822 struct scsi_cmnd *scmd;
5823 struct pqi_scsi_dev *scsi_device;
5825 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5826 queue_group = &ctrl_info->queue_groups[i];
5828 for (path = 0; path < 2; path++) {
5830 &queue_group->submit_lock[path], flags);
5832 list_for_each_entry_safe(io_request, next,
5833 &queue_group->request_list[path],
5834 request_list_entry) {
5836 scmd = io_request->scmd;
5840 scsi_device = scmd->device->hostdata;
5841 if (scsi_device != device)
5844 list_del(&io_request->request_list_entry);
5845 set_host_byte(scmd, DID_RESET);
5846 pqi_free_io_request(io_request);
5847 scsi_dma_unmap(scmd);
5848 pqi_scsi_done(scmd);
5851 spin_unlock_irqrestore(
5852 &queue_group->submit_lock[path], flags);
5857 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
5859 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5860 struct pqi_scsi_dev *device, unsigned long timeout_msecs)
5862 int cmds_outstanding;
5863 unsigned long start_jiffies;
5864 unsigned long warning_timeout;
5865 unsigned long msecs_waiting;
5867 start_jiffies = jiffies;
5868 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
5870 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) {
5871 pqi_check_ctrl_health(ctrl_info);
5872 if (pqi_ctrl_offline(ctrl_info))
5874 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
5875 if (msecs_waiting > timeout_msecs) {
5876 dev_err(&ctrl_info->pci_dev->dev,
5877 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
5878 ctrl_info->scsi_host->host_no, device->bus, device->target,
5879 device->lun, msecs_waiting / 1000, cmds_outstanding);
5882 if (time_after(jiffies, warning_timeout)) {
5883 dev_warn(&ctrl_info->pci_dev->dev,
5884 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
5885 ctrl_info->scsi_host->host_no, device->bus, device->target,
5886 device->lun, msecs_waiting / 1000, cmds_outstanding);
5887 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
5889 usleep_range(1000, 2000);
5895 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5898 struct completion *waiting = context;
5903 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
5905 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5906 struct pqi_scsi_dev *device, struct completion *wait)
5909 unsigned int wait_secs;
5914 if (wait_for_completion_io_timeout(wait,
5915 PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
5920 pqi_check_ctrl_health(ctrl_info);
5921 if (pqi_ctrl_offline(ctrl_info)) {
5926 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
5928 dev_warn(&ctrl_info->pci_dev->dev,
5929 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete\n",
5930 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun,
5937 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
5939 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
5942 struct pqi_io_request *io_request;
5943 DECLARE_COMPLETION_ONSTACK(wait);
5944 struct pqi_task_management_request *request;
5946 io_request = pqi_alloc_io_request(ctrl_info);
5947 io_request->io_complete_callback = pqi_lun_reset_complete;
5948 io_request->context = &wait;
5950 request = io_request->iu;
5951 memset(request, 0, sizeof(*request));
5953 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5954 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5955 &request->header.iu_length);
5956 put_unaligned_le16(io_request->index, &request->request_id);
5957 memcpy(request->lun_number, device->scsi3addr,
5958 sizeof(request->lun_number));
5959 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5960 if (ctrl_info->tmf_iu_timeout_supported)
5961 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
5963 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5966 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5968 rc = io_request->status;
5970 pqi_free_io_request(io_request);
5975 #define PQI_LUN_RESET_RETRIES 3
5976 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
5977 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
5978 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
5980 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
5984 unsigned int retries;
5985 unsigned long timeout_msecs;
5987 for (retries = 0;;) {
5988 reset_rc = pqi_lun_reset(ctrl_info, device);
5989 if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
5991 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
5994 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
5995 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
5997 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs);
5998 if (wait_rc && reset_rc == 0)
6001 return reset_rc == 0 ? SUCCESS : FAILED;
6004 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
6005 struct pqi_scsi_dev *device)
6009 pqi_ctrl_block_requests(ctrl_info);
6010 pqi_ctrl_wait_until_quiesced(ctrl_info);
6011 pqi_fail_io_queued_for_device(ctrl_info, device);
6012 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6016 rc = pqi_lun_reset_with_retries(ctrl_info, device);
6017 pqi_ctrl_unblock_requests(ctrl_info);
6022 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6025 struct Scsi_Host *shost;
6026 struct pqi_ctrl_info *ctrl_info;
6027 struct pqi_scsi_dev *device;
6029 shost = scmd->device->host;
6030 ctrl_info = shost_to_hba(shost);
6031 device = scmd->device->hostdata;
6033 mutex_lock(&ctrl_info->lun_reset_mutex);
6035 dev_err(&ctrl_info->pci_dev->dev,
6036 "resetting scsi %d:%d:%d:%d\n",
6037 shost->host_no, device->bus, device->target, device->lun);
6039 pqi_check_ctrl_health(ctrl_info);
6040 if (pqi_ctrl_offline(ctrl_info))
6043 rc = pqi_device_reset(ctrl_info, device);
6045 dev_err(&ctrl_info->pci_dev->dev,
6046 "reset of scsi %d:%d:%d:%d: %s\n",
6047 shost->host_no, device->bus, device->target, device->lun,
6048 rc == SUCCESS ? "SUCCESS" : "FAILED");
6050 mutex_unlock(&ctrl_info->lun_reset_mutex);
6055 static int pqi_slave_alloc(struct scsi_device *sdev)
6057 struct pqi_scsi_dev *device;
6058 unsigned long flags;
6059 struct pqi_ctrl_info *ctrl_info;
6060 struct scsi_target *starget;
6061 struct sas_rphy *rphy;
6063 ctrl_info = shost_to_hba(sdev->host);
6065 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6067 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6068 starget = scsi_target(sdev);
6069 rphy = target_to_rphy(starget);
6070 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6072 device->target = sdev_id(sdev);
6073 device->lun = sdev->lun;
6074 device->target_lun_valid = true;
6077 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6078 sdev_id(sdev), sdev->lun);
6082 sdev->hostdata = device;
6083 device->sdev = sdev;
6084 if (device->queue_depth) {
6085 device->advertised_queue_depth = device->queue_depth;
6086 scsi_change_queue_depth(sdev,
6087 device->advertised_queue_depth);
6089 if (pqi_is_logical_device(device)) {
6090 pqi_disable_write_same(sdev);
6092 sdev->allow_restart = 1;
6093 if (device->device_type == SA_DEVICE_TYPE_NVME)
6094 pqi_disable_write_same(sdev);
6098 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6103 static int pqi_map_queues(struct Scsi_Host *shost)
6105 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6107 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6108 ctrl_info->pci_dev, 0);
6111 static int pqi_slave_configure(struct scsi_device *sdev)
6113 struct pqi_scsi_dev *device;
6115 device = sdev->hostdata;
6116 device->devtype = sdev->type;
6121 static void pqi_slave_destroy(struct scsi_device *sdev)
6123 unsigned long flags;
6124 struct pqi_scsi_dev *device;
6125 struct pqi_ctrl_info *ctrl_info;
6127 ctrl_info = shost_to_hba(sdev->host);
6129 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6131 device = sdev->hostdata;
6133 sdev->hostdata = NULL;
6134 if (!list_empty(&device->scsi_device_list_entry))
6135 list_del(&device->scsi_device_list_entry);
6138 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6141 pqi_dev_info(ctrl_info, "removed", device);
6142 pqi_free_device(device);
6146 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6148 struct pci_dev *pci_dev;
6149 u32 subsystem_vendor;
6150 u32 subsystem_device;
6151 cciss_pci_info_struct pciinfo;
6156 pci_dev = ctrl_info->pci_dev;
6158 pciinfo.domain = pci_domain_nr(pci_dev->bus);
6159 pciinfo.bus = pci_dev->bus->number;
6160 pciinfo.dev_fn = pci_dev->devfn;
6161 subsystem_vendor = pci_dev->subsystem_vendor;
6162 subsystem_device = pci_dev->subsystem_device;
6163 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6165 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
6171 static int pqi_getdrivver_ioctl(void __user *arg)
6178 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6179 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6181 if (copy_to_user(arg, &version, sizeof(version)))
6187 struct ciss_error_info {
6190 size_t sense_data_length;
6193 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6194 struct ciss_error_info *ciss_error_info)
6196 int ciss_cmd_status;
6197 size_t sense_data_length;
6199 switch (pqi_error_info->data_out_result) {
6200 case PQI_DATA_IN_OUT_GOOD:
6201 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6203 case PQI_DATA_IN_OUT_UNDERFLOW:
6204 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6206 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6207 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6209 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6210 case PQI_DATA_IN_OUT_BUFFER_ERROR:
6211 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6212 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6213 case PQI_DATA_IN_OUT_ERROR:
6214 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6216 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6217 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6218 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6219 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6220 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6221 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6222 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6223 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6224 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6225 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6226 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6228 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6229 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6231 case PQI_DATA_IN_OUT_ABORTED:
6232 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6234 case PQI_DATA_IN_OUT_TIMEOUT:
6235 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6238 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6243 get_unaligned_le16(&pqi_error_info->sense_data_length);
6244 if (sense_data_length == 0)
6246 get_unaligned_le16(&pqi_error_info->response_data_length);
6247 if (sense_data_length)
6248 if (sense_data_length > sizeof(pqi_error_info->data))
6249 sense_data_length = sizeof(pqi_error_info->data);
6251 ciss_error_info->scsi_status = pqi_error_info->status;
6252 ciss_error_info->command_status = ciss_cmd_status;
6253 ciss_error_info->sense_data_length = sense_data_length;
6256 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6259 char *kernel_buffer = NULL;
6261 size_t sense_data_length;
6262 IOCTL_Command_struct iocommand;
6263 struct pqi_raid_path_request request;
6264 struct pqi_raid_error_info pqi_error_info;
6265 struct ciss_error_info ciss_error_info;
6267 if (pqi_ctrl_offline(ctrl_info))
6269 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6273 if (!capable(CAP_SYS_RAWIO))
6275 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6277 if (iocommand.buf_size < 1 &&
6278 iocommand.Request.Type.Direction != XFER_NONE)
6280 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6282 if (iocommand.Request.Type.Type != TYPE_CMD)
6285 switch (iocommand.Request.Type.Direction) {
6289 case XFER_READ | XFER_WRITE:
6295 if (iocommand.buf_size > 0) {
6296 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6299 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6300 if (copy_from_user(kernel_buffer, iocommand.buf,
6301 iocommand.buf_size)) {
6306 memset(kernel_buffer, 0, iocommand.buf_size);
6310 memset(&request, 0, sizeof(request));
6312 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6313 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6314 PQI_REQUEST_HEADER_LENGTH;
6315 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6316 sizeof(request.lun_number));
6317 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6318 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6320 switch (iocommand.Request.Type.Direction) {
6322 request.data_direction = SOP_NO_DIRECTION_FLAG;
6325 request.data_direction = SOP_WRITE_FLAG;
6328 request.data_direction = SOP_READ_FLAG;
6330 case XFER_READ | XFER_WRITE:
6331 request.data_direction = SOP_BIDIRECTIONAL;
6335 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6337 if (iocommand.buf_size > 0) {
6338 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6340 rc = pqi_map_single(ctrl_info->pci_dev,
6341 &request.sg_descriptors[0], kernel_buffer,
6342 iocommand.buf_size, DMA_BIDIRECTIONAL);
6346 iu_length += sizeof(request.sg_descriptors[0]);
6349 put_unaligned_le16(iu_length, &request.header.iu_length);
6351 if (ctrl_info->raid_iu_timeout_supported)
6352 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6354 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6355 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6357 if (iocommand.buf_size > 0)
6358 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6361 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6364 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6365 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6366 iocommand.error_info.CommandStatus =
6367 ciss_error_info.command_status;
6368 sense_data_length = ciss_error_info.sense_data_length;
6369 if (sense_data_length) {
6370 if (sense_data_length >
6371 sizeof(iocommand.error_info.SenseInfo))
6373 sizeof(iocommand.error_info.SenseInfo);
6374 memcpy(iocommand.error_info.SenseInfo,
6375 pqi_error_info.data, sense_data_length);
6376 iocommand.error_info.SenseLen = sense_data_length;
6380 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6385 if (rc == 0 && iocommand.buf_size > 0 &&
6386 (iocommand.Request.Type.Direction & XFER_READ)) {
6387 if (copy_to_user(iocommand.buf, kernel_buffer,
6388 iocommand.buf_size)) {
6394 kfree(kernel_buffer);
6399 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6403 struct pqi_ctrl_info *ctrl_info;
6405 ctrl_info = shost_to_hba(sdev->host);
6408 case CCISS_DEREGDISK:
6409 case CCISS_REGNEWDISK:
6411 rc = pqi_scan_scsi_devices(ctrl_info);
6413 case CCISS_GETPCIINFO:
6414 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6416 case CCISS_GETDRIVVER:
6417 rc = pqi_getdrivver_ioctl(arg);
6419 case CCISS_PASSTHRU:
6420 rc = pqi_passthru_ioctl(ctrl_info, arg);
6430 static ssize_t pqi_firmware_version_show(struct device *dev,
6431 struct device_attribute *attr, char *buffer)
6433 struct Scsi_Host *shost;
6434 struct pqi_ctrl_info *ctrl_info;
6436 shost = class_to_shost(dev);
6437 ctrl_info = shost_to_hba(shost);
6439 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6442 static ssize_t pqi_driver_version_show(struct device *dev,
6443 struct device_attribute *attr, char *buffer)
6445 return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6448 static ssize_t pqi_serial_number_show(struct device *dev,
6449 struct device_attribute *attr, char *buffer)
6451 struct Scsi_Host *shost;
6452 struct pqi_ctrl_info *ctrl_info;
6454 shost = class_to_shost(dev);
6455 ctrl_info = shost_to_hba(shost);
6457 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6460 static ssize_t pqi_model_show(struct device *dev,
6461 struct device_attribute *attr, char *buffer)
6463 struct Scsi_Host *shost;
6464 struct pqi_ctrl_info *ctrl_info;
6466 shost = class_to_shost(dev);
6467 ctrl_info = shost_to_hba(shost);
6469 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6472 static ssize_t pqi_vendor_show(struct device *dev,
6473 struct device_attribute *attr, char *buffer)
6475 struct Scsi_Host *shost;
6476 struct pqi_ctrl_info *ctrl_info;
6478 shost = class_to_shost(dev);
6479 ctrl_info = shost_to_hba(shost);
6481 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6484 static ssize_t pqi_host_rescan_store(struct device *dev,
6485 struct device_attribute *attr, const char *buffer, size_t count)
6487 struct Scsi_Host *shost = class_to_shost(dev);
6489 pqi_scan_start(shost);
6494 static ssize_t pqi_lockup_action_show(struct device *dev,
6495 struct device_attribute *attr, char *buffer)
6500 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6501 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6502 count += scnprintf(buffer + count, PAGE_SIZE - count,
6503 "[%s] ", pqi_lockup_actions[i].name);
6505 count += scnprintf(buffer + count, PAGE_SIZE - count,
6506 "%s ", pqi_lockup_actions[i].name);
6509 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6514 static ssize_t pqi_lockup_action_store(struct device *dev,
6515 struct device_attribute *attr, const char *buffer, size_t count)
6519 char action_name_buffer[32];
6521 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6522 action_name = strstrip(action_name_buffer);
6524 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6525 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6526 pqi_lockup_action = pqi_lockup_actions[i].action;
6534 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6535 struct device_attribute *attr, char *buffer)
6537 struct Scsi_Host *shost = class_to_shost(dev);
6538 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6540 return scnprintf(buffer, 10, "%x\n",
6541 ctrl_info->enable_stream_detection);
6544 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
6545 struct device_attribute *attr, const char *buffer, size_t count)
6547 struct Scsi_Host *shost = class_to_shost(dev);
6548 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6549 u8 set_stream_detection = 0;
6551 if (kstrtou8(buffer, 0, &set_stream_detection))
6554 if (set_stream_detection > 0)
6555 set_stream_detection = 1;
6557 ctrl_info->enable_stream_detection = set_stream_detection;
6562 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
6563 struct device_attribute *attr, char *buffer)
6565 struct Scsi_Host *shost = class_to_shost(dev);
6566 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6568 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
6571 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
6572 struct device_attribute *attr, const char *buffer, size_t count)
6574 struct Scsi_Host *shost = class_to_shost(dev);
6575 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6576 u8 set_r5_writes = 0;
6578 if (kstrtou8(buffer, 0, &set_r5_writes))
6581 if (set_r5_writes > 0)
6584 ctrl_info->enable_r5_writes = set_r5_writes;
6589 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
6590 struct device_attribute *attr, char *buffer)
6592 struct Scsi_Host *shost = class_to_shost(dev);
6593 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6595 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
6598 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
6599 struct device_attribute *attr, const char *buffer, size_t count)
6601 struct Scsi_Host *shost = class_to_shost(dev);
6602 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6603 u8 set_r6_writes = 0;
6605 if (kstrtou8(buffer, 0, &set_r6_writes))
6608 if (set_r6_writes > 0)
6611 ctrl_info->enable_r6_writes = set_r6_writes;
6616 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6617 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6618 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6619 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6620 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6621 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6622 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
6623 pqi_lockup_action_store);
6624 static DEVICE_ATTR(enable_stream_detection, 0644,
6625 pqi_host_enable_stream_detection_show,
6626 pqi_host_enable_stream_detection_store);
6627 static DEVICE_ATTR(enable_r5_writes, 0644,
6628 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
6629 static DEVICE_ATTR(enable_r6_writes, 0644,
6630 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
6632 static struct device_attribute *pqi_shost_attrs[] = {
6633 &dev_attr_driver_version,
6634 &dev_attr_firmware_version,
6636 &dev_attr_serial_number,
6639 &dev_attr_lockup_action,
6640 &dev_attr_enable_stream_detection,
6641 &dev_attr_enable_r5_writes,
6642 &dev_attr_enable_r6_writes,
6646 static ssize_t pqi_unique_id_show(struct device *dev,
6647 struct device_attribute *attr, char *buffer)
6649 struct pqi_ctrl_info *ctrl_info;
6650 struct scsi_device *sdev;
6651 struct pqi_scsi_dev *device;
6652 unsigned long flags;
6655 sdev = to_scsi_device(dev);
6656 ctrl_info = shost_to_hba(sdev->host);
6658 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6660 device = sdev->hostdata;
6662 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6666 if (device->is_physical_device) {
6667 memset(unique_id, 0, 8);
6668 memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
6670 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
6673 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6675 return scnprintf(buffer, PAGE_SIZE,
6676 "%02X%02X%02X%02X%02X%02X%02X%02X"
6677 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
6678 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
6679 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
6680 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
6681 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
6684 static ssize_t pqi_lunid_show(struct device *dev,
6685 struct device_attribute *attr, char *buffer)
6687 struct pqi_ctrl_info *ctrl_info;
6688 struct scsi_device *sdev;
6689 struct pqi_scsi_dev *device;
6690 unsigned long flags;
6693 sdev = to_scsi_device(dev);
6694 ctrl_info = shost_to_hba(sdev->host);
6696 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6698 device = sdev->hostdata;
6700 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6704 memcpy(lunid, device->scsi3addr, sizeof(lunid));
6706 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6708 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6713 static ssize_t pqi_path_info_show(struct device *dev,
6714 struct device_attribute *attr, char *buf)
6716 struct pqi_ctrl_info *ctrl_info;
6717 struct scsi_device *sdev;
6718 struct pqi_scsi_dev *device;
6719 unsigned long flags;
6726 u8 phys_connector[2];
6728 sdev = to_scsi_device(dev);
6729 ctrl_info = shost_to_hba(sdev->host);
6731 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6733 device = sdev->hostdata;
6735 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6740 for (i = 0; i < MAX_PATHS; i++) {
6741 path_map_index = 1 << i;
6742 if (i == device->active_path_index)
6744 else if (device->path_map & path_map_index)
6745 active = "Inactive";
6749 output_len += scnprintf(buf + output_len,
6750 PAGE_SIZE - output_len,
6751 "[%d:%d:%d:%d] %20.20s ",
6752 ctrl_info->scsi_host->host_no,
6753 device->bus, device->target,
6755 scsi_device_type(device->devtype));
6757 if (device->devtype == TYPE_RAID ||
6758 pqi_is_logical_device(device))
6761 memcpy(&phys_connector, &device->phys_connector[i],
6762 sizeof(phys_connector));
6763 if (phys_connector[0] < '0')
6764 phys_connector[0] = '0';
6765 if (phys_connector[1] < '0')
6766 phys_connector[1] = '0';
6768 output_len += scnprintf(buf + output_len,
6769 PAGE_SIZE - output_len,
6770 "PORT: %.2s ", phys_connector);
6772 box = device->box[i];
6773 if (box != 0 && box != 0xFF)
6774 output_len += scnprintf(buf + output_len,
6775 PAGE_SIZE - output_len,
6778 if ((device->devtype == TYPE_DISK ||
6779 device->devtype == TYPE_ZBC) &&
6780 pqi_expose_device(device))
6781 output_len += scnprintf(buf + output_len,
6782 PAGE_SIZE - output_len,
6786 output_len += scnprintf(buf + output_len,
6787 PAGE_SIZE - output_len,
6791 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6796 static ssize_t pqi_sas_address_show(struct device *dev,
6797 struct device_attribute *attr, char *buffer)
6799 struct pqi_ctrl_info *ctrl_info;
6800 struct scsi_device *sdev;
6801 struct pqi_scsi_dev *device;
6802 unsigned long flags;
6805 sdev = to_scsi_device(dev);
6806 ctrl_info = shost_to_hba(sdev->host);
6808 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6810 device = sdev->hostdata;
6811 if (!device || !pqi_is_device_with_sas_address(device)) {
6812 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6816 sas_address = device->sas_address;
6818 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6820 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6823 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
6824 struct device_attribute *attr, char *buffer)
6826 struct pqi_ctrl_info *ctrl_info;
6827 struct scsi_device *sdev;
6828 struct pqi_scsi_dev *device;
6829 unsigned long flags;
6831 sdev = to_scsi_device(dev);
6832 ctrl_info = shost_to_hba(sdev->host);
6834 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6836 device = sdev->hostdata;
6838 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6842 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
6846 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6851 static ssize_t pqi_raid_level_show(struct device *dev,
6852 struct device_attribute *attr, char *buffer)
6854 struct pqi_ctrl_info *ctrl_info;
6855 struct scsi_device *sdev;
6856 struct pqi_scsi_dev *device;
6857 unsigned long flags;
6860 sdev = to_scsi_device(dev);
6861 ctrl_info = shost_to_hba(sdev->host);
6863 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6865 device = sdev->hostdata;
6867 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6871 if (pqi_is_logical_device(device))
6872 raid_level = pqi_raid_level_to_string(device->raid_level);
6876 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6878 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
6881 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
6882 struct device_attribute *attr, char *buffer)
6884 struct pqi_ctrl_info *ctrl_info;
6885 struct scsi_device *sdev;
6886 struct pqi_scsi_dev *device;
6887 unsigned long flags;
6888 int raid_bypass_cnt;
6890 sdev = to_scsi_device(dev);
6891 ctrl_info = shost_to_hba(sdev->host);
6893 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6895 device = sdev->hostdata;
6897 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6901 raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
6903 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6905 return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
6908 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
6909 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
6910 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
6911 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
6912 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
6913 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
6914 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
6916 static struct device_attribute *pqi_sdev_attrs[] = {
6918 &dev_attr_unique_id,
6919 &dev_attr_path_info,
6920 &dev_attr_sas_address,
6921 &dev_attr_ssd_smart_path_enabled,
6922 &dev_attr_raid_level,
6923 &dev_attr_raid_bypass_cnt,
6927 static struct scsi_host_template pqi_driver_template = {
6928 .module = THIS_MODULE,
6929 .name = DRIVER_NAME_SHORT,
6930 .proc_name = DRIVER_NAME_SHORT,
6931 .queuecommand = pqi_scsi_queue_command,
6932 .scan_start = pqi_scan_start,
6933 .scan_finished = pqi_scan_finished,
6935 .eh_device_reset_handler = pqi_eh_device_reset_handler,
6937 .slave_alloc = pqi_slave_alloc,
6938 .slave_configure = pqi_slave_configure,
6939 .slave_destroy = pqi_slave_destroy,
6940 .map_queues = pqi_map_queues,
6941 .sdev_attrs = pqi_sdev_attrs,
6942 .shost_attrs = pqi_shost_attrs,
6945 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
6948 struct Scsi_Host *shost;
6950 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
6952 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
6957 shost->n_io_port = 0;
6958 shost->this_id = -1;
6959 shost->max_channel = PQI_MAX_BUS;
6960 shost->max_cmd_len = MAX_COMMAND_SIZE;
6961 shost->max_lun = ~0;
6963 shost->max_sectors = ctrl_info->max_sectors;
6964 shost->can_queue = ctrl_info->scsi_ml_can_queue;
6965 shost->cmd_per_lun = shost->can_queue;
6966 shost->sg_tablesize = ctrl_info->sg_tablesize;
6967 shost->transportt = pqi_sas_transport_template;
6968 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6969 shost->unique_id = shost->irq;
6970 shost->nr_hw_queues = ctrl_info->num_queue_groups;
6971 shost->host_tagset = 1;
6972 shost->hostdata[0] = (unsigned long)ctrl_info;
6974 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
6976 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
6980 rc = pqi_add_sas_host(shost, ctrl_info);
6982 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
6986 ctrl_info->scsi_host = shost;
6991 scsi_remove_host(shost);
6993 scsi_host_put(shost);
6998 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7000 struct Scsi_Host *shost;
7002 pqi_delete_sas_host(ctrl_info);
7004 shost = ctrl_info->scsi_host;
7008 scsi_remove_host(shost);
7009 scsi_host_put(shost);
7012 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7015 struct pqi_device_registers __iomem *pqi_registers;
7016 unsigned long timeout;
7017 unsigned int timeout_msecs;
7018 union pqi_reset_register reset_reg;
7020 pqi_registers = ctrl_info->pqi_registers;
7021 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7022 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7025 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7026 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7027 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7029 pqi_check_ctrl_health(ctrl_info);
7030 if (pqi_ctrl_offline(ctrl_info)) {
7034 if (time_after(jiffies, timeout)) {
7043 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7046 union pqi_reset_register reset_reg;
7048 if (ctrl_info->pqi_reset_quiesce_supported) {
7049 rc = sis_pqi_reset_quiesce(ctrl_info);
7051 dev_err(&ctrl_info->pci_dev->dev,
7052 "PQI reset failed during quiesce with error %d\n", rc);
7057 reset_reg.all_bits = 0;
7058 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7059 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7061 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7063 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7065 dev_err(&ctrl_info->pci_dev->dev,
7066 "PQI reset failed with error %d\n", rc);
7071 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7074 struct bmic_sense_subsystem_info *sense_info;
7076 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7080 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7084 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7085 sizeof(sense_info->ctrl_serial_number));
7086 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7094 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7097 struct bmic_identify_controller *identify;
7099 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7103 rc = pqi_identify_controller(ctrl_info, identify);
7107 if (get_unaligned_le32(&identify->extra_controller_flags) &
7108 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7109 memcpy(ctrl_info->firmware_version,
7110 identify->firmware_version_long,
7111 sizeof(identify->firmware_version_long));
7113 memcpy(ctrl_info->firmware_version,
7114 identify->firmware_version_short,
7115 sizeof(identify->firmware_version_short));
7116 ctrl_info->firmware_version
7117 [sizeof(identify->firmware_version_short)] = '\0';
7118 snprintf(ctrl_info->firmware_version +
7119 strlen(ctrl_info->firmware_version),
7120 sizeof(ctrl_info->firmware_version) -
7121 sizeof(identify->firmware_version_short),
7123 get_unaligned_le16(&identify->firmware_build_number));
7126 memcpy(ctrl_info->model, identify->product_id,
7127 sizeof(identify->product_id));
7128 ctrl_info->model[sizeof(identify->product_id)] = '\0';
7130 memcpy(ctrl_info->vendor, identify->vendor_id,
7131 sizeof(identify->vendor_id));
7132 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7140 struct pqi_config_table_section_info {
7141 struct pqi_ctrl_info *ctrl_info;
7144 void __iomem *section_iomem_addr;
7147 static inline bool pqi_is_firmware_feature_supported(
7148 struct pqi_config_table_firmware_features *firmware_features,
7149 unsigned int bit_position)
7151 unsigned int byte_index;
7153 byte_index = bit_position / BITS_PER_BYTE;
7155 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7158 return firmware_features->features_supported[byte_index] &
7159 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7162 static inline bool pqi_is_firmware_feature_enabled(
7163 struct pqi_config_table_firmware_features *firmware_features,
7164 void __iomem *firmware_features_iomem_addr,
7165 unsigned int bit_position)
7167 unsigned int byte_index;
7168 u8 __iomem *features_enabled_iomem_addr;
7170 byte_index = (bit_position / BITS_PER_BYTE) +
7171 (le16_to_cpu(firmware_features->num_elements) * 2);
7173 features_enabled_iomem_addr = firmware_features_iomem_addr +
7174 offsetof(struct pqi_config_table_firmware_features,
7175 features_supported) + byte_index;
7177 return *((__force u8 *)features_enabled_iomem_addr) &
7178 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7181 static inline void pqi_request_firmware_feature(
7182 struct pqi_config_table_firmware_features *firmware_features,
7183 unsigned int bit_position)
7185 unsigned int byte_index;
7187 byte_index = (bit_position / BITS_PER_BYTE) +
7188 le16_to_cpu(firmware_features->num_elements);
7190 firmware_features->features_supported[byte_index] |=
7191 (1 << (bit_position % BITS_PER_BYTE));
7194 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7195 u16 first_section, u16 last_section)
7197 struct pqi_vendor_general_request request;
7199 memset(&request, 0, sizeof(request));
7201 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7202 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7203 &request.header.iu_length);
7204 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7205 &request.function_code);
7206 put_unaligned_le16(first_section,
7207 &request.data.config_table_update.first_section);
7208 put_unaligned_le16(last_section,
7209 &request.data.config_table_update.last_section);
7211 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7214 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7215 struct pqi_config_table_firmware_features *firmware_features,
7216 void __iomem *firmware_features_iomem_addr)
7218 void *features_requested;
7219 void __iomem *features_requested_iomem_addr;
7220 void __iomem *host_max_known_feature_iomem_addr;
7222 features_requested = firmware_features->features_supported +
7223 le16_to_cpu(firmware_features->num_elements);
7225 features_requested_iomem_addr = firmware_features_iomem_addr +
7226 (features_requested - (void *)firmware_features);
7228 memcpy_toio(features_requested_iomem_addr, features_requested,
7229 le16_to_cpu(firmware_features->num_elements));
7231 if (pqi_is_firmware_feature_supported(firmware_features,
7232 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7233 host_max_known_feature_iomem_addr =
7234 features_requested_iomem_addr +
7235 (le16_to_cpu(firmware_features->num_elements) * 2) +
7237 writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
7238 host_max_known_feature_iomem_addr);
7241 return pqi_config_table_update(ctrl_info,
7242 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7243 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7246 struct pqi_firmware_feature {
7248 unsigned int feature_bit;
7251 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7252 struct pqi_firmware_feature *firmware_feature);
7255 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7256 struct pqi_firmware_feature *firmware_feature)
7258 if (!firmware_feature->supported) {
7259 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7260 firmware_feature->feature_name);
7264 if (firmware_feature->enabled) {
7265 dev_info(&ctrl_info->pci_dev->dev,
7266 "%s enabled\n", firmware_feature->feature_name);
7270 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7271 firmware_feature->feature_name);
7274 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7275 struct pqi_firmware_feature *firmware_feature)
7277 switch (firmware_feature->feature_bit) {
7278 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7279 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7281 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7282 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7284 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7285 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7287 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7288 ctrl_info->soft_reset_handshake_supported =
7289 firmware_feature->enabled &&
7290 pqi_read_soft_reset_status(ctrl_info);
7292 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7293 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7295 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7296 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7298 case PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN:
7299 ctrl_info->unique_wwid_in_report_phys_lun_supported =
7300 firmware_feature->enabled;
7304 pqi_firmware_feature_status(ctrl_info, firmware_feature);
7307 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7308 struct pqi_firmware_feature *firmware_feature)
7310 if (firmware_feature->feature_status)
7311 firmware_feature->feature_status(ctrl_info, firmware_feature);
7314 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7316 static struct pqi_firmware_feature pqi_firmware_features[] = {
7318 .feature_name = "Online Firmware Activation",
7319 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7320 .feature_status = pqi_firmware_feature_status,
7323 .feature_name = "Serial Management Protocol",
7324 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7325 .feature_status = pqi_firmware_feature_status,
7328 .feature_name = "Maximum Known Feature",
7329 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7330 .feature_status = pqi_firmware_feature_status,
7333 .feature_name = "RAID 0 Read Bypass",
7334 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7335 .feature_status = pqi_firmware_feature_status,
7338 .feature_name = "RAID 1 Read Bypass",
7339 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7340 .feature_status = pqi_firmware_feature_status,
7343 .feature_name = "RAID 5 Read Bypass",
7344 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7345 .feature_status = pqi_firmware_feature_status,
7348 .feature_name = "RAID 6 Read Bypass",
7349 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7350 .feature_status = pqi_firmware_feature_status,
7353 .feature_name = "RAID 0 Write Bypass",
7354 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7355 .feature_status = pqi_firmware_feature_status,
7358 .feature_name = "RAID 1 Write Bypass",
7359 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7360 .feature_status = pqi_ctrl_update_feature_flags,
7363 .feature_name = "RAID 5 Write Bypass",
7364 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7365 .feature_status = pqi_ctrl_update_feature_flags,
7368 .feature_name = "RAID 6 Write Bypass",
7369 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7370 .feature_status = pqi_ctrl_update_feature_flags,
7373 .feature_name = "New Soft Reset Handshake",
7374 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
7375 .feature_status = pqi_ctrl_update_feature_flags,
7378 .feature_name = "RAID IU Timeout",
7379 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
7380 .feature_status = pqi_ctrl_update_feature_flags,
7383 .feature_name = "TMF IU Timeout",
7384 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
7385 .feature_status = pqi_ctrl_update_feature_flags,
7388 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
7389 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
7390 .feature_status = pqi_firmware_feature_status,
7393 .feature_name = "Unique WWID in Report Physical LUN",
7394 .feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN,
7395 .feature_status = pqi_ctrl_update_feature_flags,
7399 static void pqi_process_firmware_features(
7400 struct pqi_config_table_section_info *section_info)
7403 struct pqi_ctrl_info *ctrl_info;
7404 struct pqi_config_table_firmware_features *firmware_features;
7405 void __iomem *firmware_features_iomem_addr;
7407 unsigned int num_features_supported;
7409 ctrl_info = section_info->ctrl_info;
7410 firmware_features = section_info->section;
7411 firmware_features_iomem_addr = section_info->section_iomem_addr;
7413 for (i = 0, num_features_supported = 0;
7414 i < ARRAY_SIZE(pqi_firmware_features); i++) {
7415 if (pqi_is_firmware_feature_supported(firmware_features,
7416 pqi_firmware_features[i].feature_bit)) {
7417 pqi_firmware_features[i].supported = true;
7418 num_features_supported++;
7420 pqi_firmware_feature_update(ctrl_info,
7421 &pqi_firmware_features[i]);
7425 if (num_features_supported == 0)
7428 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7429 if (!pqi_firmware_features[i].supported)
7431 pqi_request_firmware_feature(firmware_features,
7432 pqi_firmware_features[i].feature_bit);
7435 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
7436 firmware_features_iomem_addr);
7438 dev_err(&ctrl_info->pci_dev->dev,
7439 "failed to enable firmware features in PQI configuration table\n");
7440 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7441 if (!pqi_firmware_features[i].supported)
7443 pqi_firmware_feature_update(ctrl_info,
7444 &pqi_firmware_features[i]);
7449 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7450 if (!pqi_firmware_features[i].supported)
7452 if (pqi_is_firmware_feature_enabled(firmware_features,
7453 firmware_features_iomem_addr,
7454 pqi_firmware_features[i].feature_bit)) {
7455 pqi_firmware_features[i].enabled = true;
7457 pqi_firmware_feature_update(ctrl_info,
7458 &pqi_firmware_features[i]);
7462 static void pqi_init_firmware_features(void)
7466 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7467 pqi_firmware_features[i].supported = false;
7468 pqi_firmware_features[i].enabled = false;
7472 static void pqi_process_firmware_features_section(
7473 struct pqi_config_table_section_info *section_info)
7475 mutex_lock(&pqi_firmware_features_mutex);
7476 pqi_init_firmware_features();
7477 pqi_process_firmware_features(section_info);
7478 mutex_unlock(&pqi_firmware_features_mutex);
7482 * Reset all controller settings that can be initialized during the processing
7483 * of the PQI Configuration Table.
7486 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
7488 ctrl_info->heartbeat_counter = NULL;
7489 ctrl_info->soft_reset_status = NULL;
7490 ctrl_info->soft_reset_handshake_supported = false;
7491 ctrl_info->enable_r1_writes = false;
7492 ctrl_info->enable_r5_writes = false;
7493 ctrl_info->enable_r6_writes = false;
7494 ctrl_info->raid_iu_timeout_supported = false;
7495 ctrl_info->tmf_iu_timeout_supported = false;
7496 ctrl_info->unique_wwid_in_report_phys_lun_supported = false;
7499 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7503 bool firmware_feature_section_present;
7504 void __iomem *table_iomem_addr;
7505 struct pqi_config_table *config_table;
7506 struct pqi_config_table_section_header *section;
7507 struct pqi_config_table_section_info section_info;
7508 struct pqi_config_table_section_info feature_section_info;
7510 table_length = ctrl_info->config_table_length;
7511 if (table_length == 0)
7514 config_table = kmalloc(table_length, GFP_KERNEL);
7515 if (!config_table) {
7516 dev_err(&ctrl_info->pci_dev->dev,
7517 "failed to allocate memory for PQI configuration table\n");
7522 * Copy the config table contents from I/O memory space into the
7525 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
7526 memcpy_fromio(config_table, table_iomem_addr, table_length);
7528 firmware_feature_section_present = false;
7529 section_info.ctrl_info = ctrl_info;
7530 section_offset = get_unaligned_le32(&config_table->first_section_offset);
7532 while (section_offset) {
7533 section = (void *)config_table + section_offset;
7535 section_info.section = section;
7536 section_info.section_offset = section_offset;
7537 section_info.section_iomem_addr = table_iomem_addr + section_offset;
7539 switch (get_unaligned_le16(§ion->section_id)) {
7540 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7541 firmware_feature_section_present = true;
7542 feature_section_info = section_info;
7544 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
7545 if (pqi_disable_heartbeat)
7546 dev_warn(&ctrl_info->pci_dev->dev,
7547 "heartbeat disabled by module parameter\n");
7549 ctrl_info->heartbeat_counter =
7552 offsetof(struct pqi_config_table_heartbeat,
7555 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7556 ctrl_info->soft_reset_status =
7559 offsetof(struct pqi_config_table_soft_reset,
7564 section_offset = get_unaligned_le16(§ion->next_section_offset);
7568 * We process the firmware feature section after all other sections
7569 * have been processed so that the feature bit callbacks can take
7570 * into account the settings configured by other sections.
7572 if (firmware_feature_section_present)
7573 pqi_process_firmware_features_section(&feature_section_info);
7575 kfree(config_table);
7580 /* Switches the controller from PQI mode back into SIS mode. */
7582 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7586 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
7587 rc = pqi_reset(ctrl_info);
7590 rc = sis_reenable_sis_mode(ctrl_info);
7592 dev_err(&ctrl_info->pci_dev->dev,
7593 "re-enabling SIS mode failed with error %d\n", rc);
7596 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7602 * If the controller isn't already in SIS mode, this function forces it into
7606 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
7608 if (!sis_is_firmware_running(ctrl_info))
7611 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
7614 if (sis_is_kernel_up(ctrl_info)) {
7615 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7619 return pqi_revert_to_sis_mode(ctrl_info);
7622 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7627 if (reset_devices) {
7628 sis_soft_reset(ctrl_info);
7629 msleep(PQI_POST_RESET_DELAY_SECS * PQI_HZ);
7631 rc = pqi_force_sis_mode(ctrl_info);
7637 * Wait until the controller is ready to start accepting SIS
7640 rc = sis_wait_for_ctrl_ready(ctrl_info);
7645 * Get the controller properties. This allows us to determine
7646 * whether or not it supports PQI mode.
7648 rc = sis_get_ctrl_properties(ctrl_info);
7650 dev_err(&ctrl_info->pci_dev->dev,
7651 "error obtaining controller properties\n");
7655 rc = sis_get_pqi_capabilities(ctrl_info);
7657 dev_err(&ctrl_info->pci_dev->dev,
7658 "error obtaining controller capabilities\n");
7662 product_id = sis_get_product_id(ctrl_info);
7663 ctrl_info->product_id = (u8)product_id;
7664 ctrl_info->product_revision = (u8)(product_id >> 8);
7666 if (reset_devices) {
7667 if (ctrl_info->max_outstanding_requests >
7668 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
7669 ctrl_info->max_outstanding_requests =
7670 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
7672 if (ctrl_info->max_outstanding_requests >
7673 PQI_MAX_OUTSTANDING_REQUESTS)
7674 ctrl_info->max_outstanding_requests =
7675 PQI_MAX_OUTSTANDING_REQUESTS;
7678 pqi_calculate_io_resources(ctrl_info);
7680 rc = pqi_alloc_error_buffer(ctrl_info);
7682 dev_err(&ctrl_info->pci_dev->dev,
7683 "failed to allocate PQI error buffer\n");
7688 * If the function we are about to call succeeds, the
7689 * controller will transition from legacy SIS mode
7692 rc = sis_init_base_struct_addr(ctrl_info);
7694 dev_err(&ctrl_info->pci_dev->dev,
7695 "error initializing PQI mode\n");
7699 /* Wait for the controller to complete the SIS -> PQI transition. */
7700 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7702 dev_err(&ctrl_info->pci_dev->dev,
7703 "transition to PQI mode failed\n");
7707 /* From here on, we are running in PQI mode. */
7708 ctrl_info->pqi_mode_enabled = true;
7709 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7711 rc = pqi_alloc_admin_queues(ctrl_info);
7713 dev_err(&ctrl_info->pci_dev->dev,
7714 "failed to allocate admin queues\n");
7718 rc = pqi_create_admin_queues(ctrl_info);
7720 dev_err(&ctrl_info->pci_dev->dev,
7721 "error creating admin queues\n");
7725 rc = pqi_report_device_capability(ctrl_info);
7727 dev_err(&ctrl_info->pci_dev->dev,
7728 "obtaining device capability failed\n");
7732 rc = pqi_validate_device_capability(ctrl_info);
7736 pqi_calculate_queue_resources(ctrl_info);
7738 rc = pqi_enable_msix_interrupts(ctrl_info);
7742 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
7743 ctrl_info->max_msix_vectors =
7744 ctrl_info->num_msix_vectors_enabled;
7745 pqi_calculate_queue_resources(ctrl_info);
7748 rc = pqi_alloc_io_resources(ctrl_info);
7752 rc = pqi_alloc_operational_queues(ctrl_info);
7754 dev_err(&ctrl_info->pci_dev->dev,
7755 "failed to allocate operational queues\n");
7759 pqi_init_operational_queues(ctrl_info);
7761 rc = pqi_request_irqs(ctrl_info);
7765 rc = pqi_create_queues(ctrl_info);
7769 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7771 ctrl_info->controller_online = true;
7773 rc = pqi_process_config_table(ctrl_info);
7777 pqi_start_heartbeat_timer(ctrl_info);
7779 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
7780 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
7781 if (rc) { /* Supported features not returned correctly. */
7782 dev_err(&ctrl_info->pci_dev->dev,
7783 "error obtaining advanced RAID bypass configuration\n");
7786 ctrl_info->ciss_report_log_flags |=
7787 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
7790 rc = pqi_enable_events(ctrl_info);
7792 dev_err(&ctrl_info->pci_dev->dev,
7793 "error enabling events\n");
7797 /* Register with the SCSI subsystem. */
7798 rc = pqi_register_scsi(ctrl_info);
7802 rc = pqi_get_ctrl_product_details(ctrl_info);
7804 dev_err(&ctrl_info->pci_dev->dev,
7805 "error obtaining product details\n");
7809 rc = pqi_get_ctrl_serial_number(ctrl_info);
7811 dev_err(&ctrl_info->pci_dev->dev,
7812 "error obtaining ctrl serial number\n");
7816 rc = pqi_set_diag_rescan(ctrl_info);
7818 dev_err(&ctrl_info->pci_dev->dev,
7819 "error enabling multi-lun rescan\n");
7823 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7825 dev_err(&ctrl_info->pci_dev->dev,
7826 "error updating host wellness\n");
7830 pqi_schedule_update_time_worker(ctrl_info);
7832 pqi_scan_scsi_devices(ctrl_info);
7837 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
7840 struct pqi_admin_queues *admin_queues;
7841 struct pqi_event_queue *event_queue;
7843 admin_queues = &ctrl_info->admin_queues;
7844 admin_queues->iq_pi_copy = 0;
7845 admin_queues->oq_ci_copy = 0;
7846 writel(0, admin_queues->oq_pi);
7848 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
7849 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
7850 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
7851 ctrl_info->queue_groups[i].oq_ci_copy = 0;
7853 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
7854 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
7855 writel(0, ctrl_info->queue_groups[i].oq_pi);
7858 event_queue = &ctrl_info->event_queue;
7859 writel(0, event_queue->oq_pi);
7860 event_queue->oq_ci_copy = 0;
7863 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
7867 rc = pqi_force_sis_mode(ctrl_info);
7872 * Wait until the controller is ready to start accepting SIS
7875 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
7880 * Get the controller properties. This allows us to determine
7881 * whether or not it supports PQI mode.
7883 rc = sis_get_ctrl_properties(ctrl_info);
7885 dev_err(&ctrl_info->pci_dev->dev,
7886 "error obtaining controller properties\n");
7890 rc = sis_get_pqi_capabilities(ctrl_info);
7892 dev_err(&ctrl_info->pci_dev->dev,
7893 "error obtaining controller capabilities\n");
7898 * If the function we are about to call succeeds, the
7899 * controller will transition from legacy SIS mode
7902 rc = sis_init_base_struct_addr(ctrl_info);
7904 dev_err(&ctrl_info->pci_dev->dev,
7905 "error initializing PQI mode\n");
7909 /* Wait for the controller to complete the SIS -> PQI transition. */
7910 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7912 dev_err(&ctrl_info->pci_dev->dev,
7913 "transition to PQI mode failed\n");
7917 /* From here on, we are running in PQI mode. */
7918 ctrl_info->pqi_mode_enabled = true;
7919 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7921 pqi_reinit_queues(ctrl_info);
7923 rc = pqi_create_admin_queues(ctrl_info);
7925 dev_err(&ctrl_info->pci_dev->dev,
7926 "error creating admin queues\n");
7930 rc = pqi_create_queues(ctrl_info);
7934 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7936 ctrl_info->controller_online = true;
7937 pqi_ctrl_unblock_requests(ctrl_info);
7939 pqi_ctrl_reset_config(ctrl_info);
7941 rc = pqi_process_config_table(ctrl_info);
7945 pqi_start_heartbeat_timer(ctrl_info);
7947 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
7948 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
7950 dev_err(&ctrl_info->pci_dev->dev,
7951 "error obtaining advanced RAID bypass configuration\n");
7954 ctrl_info->ciss_report_log_flags |=
7955 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
7958 rc = pqi_enable_events(ctrl_info);
7960 dev_err(&ctrl_info->pci_dev->dev,
7961 "error enabling events\n");
7965 rc = pqi_get_ctrl_product_details(ctrl_info);
7967 dev_err(&ctrl_info->pci_dev->dev,
7968 "error obtaining product details\n");
7972 rc = pqi_set_diag_rescan(ctrl_info);
7974 dev_err(&ctrl_info->pci_dev->dev,
7975 "error enabling multi-lun rescan\n");
7979 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7981 dev_err(&ctrl_info->pci_dev->dev,
7982 "error updating host wellness\n");
7986 if (pqi_ofa_in_progress(ctrl_info))
7987 pqi_ctrl_unblock_scan(ctrl_info);
7989 pqi_scan_scsi_devices(ctrl_info);
7994 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
7998 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
7999 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8001 return pcibios_err_to_errno(rc);
8004 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8009 rc = pci_enable_device(ctrl_info->pci_dev);
8011 dev_err(&ctrl_info->pci_dev->dev,
8012 "failed to enable PCI device\n");
8016 if (sizeof(dma_addr_t) > 4)
8017 mask = DMA_BIT_MASK(64);
8019 mask = DMA_BIT_MASK(32);
8021 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8023 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8024 goto disable_device;
8027 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8029 dev_err(&ctrl_info->pci_dev->dev,
8030 "failed to obtain PCI resources\n");
8031 goto disable_device;
8034 ctrl_info->iomem_base = ioremap(pci_resource_start(
8035 ctrl_info->pci_dev, 0),
8036 sizeof(struct pqi_ctrl_registers));
8037 if (!ctrl_info->iomem_base) {
8038 dev_err(&ctrl_info->pci_dev->dev,
8039 "failed to map memory for controller registers\n");
8041 goto release_regions;
8044 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
8046 /* Increase the PCIe completion timeout. */
8047 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8048 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8050 dev_err(&ctrl_info->pci_dev->dev,
8051 "failed to set PCIe completion timeout\n");
8052 goto release_regions;
8055 /* Enable bus mastering. */
8056 pci_set_master(ctrl_info->pci_dev);
8058 ctrl_info->registers = ctrl_info->iomem_base;
8059 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8061 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8066 pci_release_regions(ctrl_info->pci_dev);
8068 pci_disable_device(ctrl_info->pci_dev);
8073 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8075 iounmap(ctrl_info->iomem_base);
8076 pci_release_regions(ctrl_info->pci_dev);
8077 if (pci_is_enabled(ctrl_info->pci_dev))
8078 pci_disable_device(ctrl_info->pci_dev);
8079 pci_set_drvdata(ctrl_info->pci_dev, NULL);
8082 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8084 struct pqi_ctrl_info *ctrl_info;
8086 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8087 GFP_KERNEL, numa_node);
8091 mutex_init(&ctrl_info->scan_mutex);
8092 mutex_init(&ctrl_info->lun_reset_mutex);
8093 mutex_init(&ctrl_info->ofa_mutex);
8095 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8096 spin_lock_init(&ctrl_info->scsi_device_list_lock);
8098 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8099 atomic_set(&ctrl_info->num_interrupts, 0);
8101 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8102 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8104 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8105 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8107 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8108 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8110 sema_init(&ctrl_info->sync_request_sem,
8111 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8112 init_waitqueue_head(&ctrl_info->block_requests_wait);
8114 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8115 ctrl_info->irq_mode = IRQ_MODE_NONE;
8116 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8118 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8119 ctrl_info->max_transfer_encrypted_sas_sata =
8120 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8121 ctrl_info->max_transfer_encrypted_nvme =
8122 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8123 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8124 ctrl_info->max_write_raid_1_10_2drive = ~0;
8125 ctrl_info->max_write_raid_1_10_3drive = ~0;
8130 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8135 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8137 pqi_free_irqs(ctrl_info);
8138 pqi_disable_msix_interrupts(ctrl_info);
8141 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8143 pqi_stop_heartbeat_timer(ctrl_info);
8144 pqi_free_interrupts(ctrl_info);
8145 if (ctrl_info->queue_memory_base)
8146 dma_free_coherent(&ctrl_info->pci_dev->dev,
8147 ctrl_info->queue_memory_length,
8148 ctrl_info->queue_memory_base,
8149 ctrl_info->queue_memory_base_dma_handle);
8150 if (ctrl_info->admin_queue_memory_base)
8151 dma_free_coherent(&ctrl_info->pci_dev->dev,
8152 ctrl_info->admin_queue_memory_length,
8153 ctrl_info->admin_queue_memory_base,
8154 ctrl_info->admin_queue_memory_base_dma_handle);
8155 pqi_free_all_io_requests(ctrl_info);
8156 if (ctrl_info->error_buffer)
8157 dma_free_coherent(&ctrl_info->pci_dev->dev,
8158 ctrl_info->error_buffer_length,
8159 ctrl_info->error_buffer,
8160 ctrl_info->error_buffer_dma_handle);
8161 if (ctrl_info->iomem_base)
8162 pqi_cleanup_pci_init(ctrl_info);
8163 pqi_free_ctrl_info(ctrl_info);
8166 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8168 pqi_cancel_rescan_worker(ctrl_info);
8169 pqi_cancel_update_time_worker(ctrl_info);
8170 pqi_unregister_scsi(ctrl_info);
8171 if (ctrl_info->pqi_mode_enabled)
8172 pqi_revert_to_sis_mode(ctrl_info);
8173 pqi_free_ctrl_resources(ctrl_info);
8176 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8178 pqi_ctrl_block_scan(ctrl_info);
8179 pqi_scsi_block_requests(ctrl_info);
8180 pqi_ctrl_block_device_reset(ctrl_info);
8181 pqi_ctrl_block_requests(ctrl_info);
8182 pqi_ctrl_wait_until_quiesced(ctrl_info);
8183 pqi_stop_heartbeat_timer(ctrl_info);
8186 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8188 pqi_start_heartbeat_timer(ctrl_info);
8189 pqi_ctrl_unblock_requests(ctrl_info);
8190 pqi_ctrl_unblock_device_reset(ctrl_info);
8191 pqi_scsi_unblock_requests(ctrl_info);
8192 pqi_ctrl_unblock_scan(ctrl_info);
8195 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
8200 struct pqi_ofa_memory *ofap;
8201 struct pqi_sg_descriptor *mem_descriptor;
8202 dma_addr_t dma_handle;
8204 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8206 sg_count = DIV_ROUND_UP(total_size, chunk_size);
8207 if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
8210 ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
8211 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
8214 dev = &ctrl_info->pci_dev->dev;
8216 for (i = 0; i < sg_count; i++) {
8217 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
8218 dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8219 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
8220 goto out_free_chunks;
8221 mem_descriptor = &ofap->sg_descriptor[i];
8222 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8223 put_unaligned_le32(chunk_size, &mem_descriptor->length);
8226 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8227 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
8228 put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
8234 mem_descriptor = &ofap->sg_descriptor[i];
8235 dma_free_coherent(dev, chunk_size,
8236 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8237 get_unaligned_le64(&mem_descriptor->address));
8239 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8245 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
8251 if (ctrl_info->ofa_bytes_requested == 0)
8254 total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
8255 min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
8256 min_chunk_size = PAGE_ALIGN(min_chunk_size);
8258 for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
8259 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
8262 chunk_size = PAGE_ALIGN(chunk_size);
8268 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
8271 struct pqi_ofa_memory *ofap;
8273 dev = &ctrl_info->pci_dev->dev;
8275 ofap = dma_alloc_coherent(dev, sizeof(*ofap),
8276 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
8280 ctrl_info->pqi_ofa_mem_virt_addr = ofap;
8282 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
8284 "failed to allocate host buffer for Online Firmware Activation\n");
8285 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
8286 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8290 put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
8291 memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
8294 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
8298 struct pqi_ofa_memory *ofap;
8299 struct pqi_sg_descriptor *mem_descriptor;
8300 unsigned int num_memory_descriptors;
8302 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8306 dev = &ctrl_info->pci_dev->dev;
8308 if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
8311 mem_descriptor = ofap->sg_descriptor;
8312 num_memory_descriptors =
8313 get_unaligned_le16(&ofap->num_memory_descriptors);
8315 for (i = 0; i < num_memory_descriptors; i++) {
8316 dma_free_coherent(dev,
8317 get_unaligned_le32(&mem_descriptor[i].length),
8318 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8319 get_unaligned_le64(&mem_descriptor[i].address));
8321 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8324 dma_free_coherent(dev, sizeof(*ofap), ofap,
8325 ctrl_info->pqi_ofa_mem_dma_handle);
8326 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8329 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
8332 struct pqi_vendor_general_request request;
8333 struct pqi_ofa_memory *ofap;
8335 memset(&request, 0, sizeof(request));
8337 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
8338 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
8339 &request.header.iu_length);
8340 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
8341 &request.function_code);
8343 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8346 buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
8347 get_unaligned_le16(&ofap->num_memory_descriptors) *
8348 sizeof(struct pqi_sg_descriptor);
8350 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
8351 &request.data.ofa_memory_allocation.buffer_address);
8352 put_unaligned_le32(buffer_length,
8353 &request.data.ofa_memory_allocation.buffer_length);
8356 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
8359 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8363 return pqi_ctrl_init_resume(ctrl_info);
8366 static void pqi_perform_lockup_action(void)
8368 switch (pqi_lockup_action) {
8370 panic("FATAL: Smart Family Controller lockup detected");
8373 emergency_restart();
8381 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
8382 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
8383 .status = SAM_STAT_CHECK_CONDITION,
8386 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
8389 struct pqi_io_request *io_request;
8390 struct scsi_cmnd *scmd;
8392 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8393 io_request = &ctrl_info->io_request_pool[i];
8394 if (atomic_read(&io_request->refcount) == 0)
8397 scmd = io_request->scmd;
8399 set_host_byte(scmd, DID_NO_CONNECT);
8401 io_request->status = -ENXIO;
8402 io_request->error_info =
8403 &pqi_ctrl_offline_raid_error_info;
8406 io_request->io_complete_callback(io_request,
8407 io_request->context);
8411 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
8413 pqi_perform_lockup_action();
8414 pqi_stop_heartbeat_timer(ctrl_info);
8415 pqi_free_interrupts(ctrl_info);
8416 pqi_cancel_rescan_worker(ctrl_info);
8417 pqi_cancel_update_time_worker(ctrl_info);
8418 pqi_ctrl_wait_until_quiesced(ctrl_info);
8419 pqi_fail_all_outstanding_requests(ctrl_info);
8420 pqi_ctrl_unblock_requests(ctrl_info);
8423 static void pqi_ctrl_offline_worker(struct work_struct *work)
8425 struct pqi_ctrl_info *ctrl_info;
8427 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
8428 pqi_take_ctrl_offline_deferred(ctrl_info);
8431 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
8433 if (!ctrl_info->controller_online)
8436 ctrl_info->controller_online = false;
8437 ctrl_info->pqi_mode_enabled = false;
8438 pqi_ctrl_block_requests(ctrl_info);
8439 if (!pqi_disable_ctrl_shutdown)
8440 sis_shutdown_ctrl(ctrl_info);
8441 pci_disable_device(ctrl_info->pci_dev);
8442 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
8443 schedule_work(&ctrl_info->ctrl_offline_work);
8446 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
8447 const struct pci_device_id *id)
8449 char *ctrl_description;
8451 if (id->driver_data)
8452 ctrl_description = (char *)id->driver_data;
8454 ctrl_description = "Microsemi Smart Family Controller";
8456 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
8459 static int pqi_pci_probe(struct pci_dev *pci_dev,
8460 const struct pci_device_id *id)
8464 struct pqi_ctrl_info *ctrl_info;
8466 pqi_print_ctrl_info(pci_dev, id);
8468 if (pqi_disable_device_id_wildcards &&
8469 id->subvendor == PCI_ANY_ID &&
8470 id->subdevice == PCI_ANY_ID) {
8471 dev_warn(&pci_dev->dev,
8472 "controller not probed because device ID wildcards are disabled\n");
8476 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
8477 dev_warn(&pci_dev->dev,
8478 "controller device ID matched using wildcards\n");
8480 node = dev_to_node(&pci_dev->dev);
8481 if (node == NUMA_NO_NODE) {
8482 cp_node = cpu_to_node(0);
8483 if (cp_node == NUMA_NO_NODE)
8485 set_dev_node(&pci_dev->dev, cp_node);
8488 ctrl_info = pqi_alloc_ctrl_info(node);
8490 dev_err(&pci_dev->dev,
8491 "failed to allocate controller info block\n");
8495 ctrl_info->pci_dev = pci_dev;
8497 rc = pqi_pci_init(ctrl_info);
8501 rc = pqi_ctrl_init(ctrl_info);
8508 pqi_remove_ctrl(ctrl_info);
8513 static void pqi_pci_remove(struct pci_dev *pci_dev)
8515 struct pqi_ctrl_info *ctrl_info;
8517 ctrl_info = pci_get_drvdata(pci_dev);
8521 pqi_remove_ctrl(ctrl_info);
8524 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
8527 struct pqi_io_request *io_request;
8528 struct scsi_cmnd *scmd;
8530 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8531 io_request = &ctrl_info->io_request_pool[i];
8532 if (atomic_read(&io_request->refcount) == 0)
8534 scmd = io_request->scmd;
8535 WARN_ON(scmd != NULL); /* IO command from SML */
8536 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
8540 static void pqi_shutdown(struct pci_dev *pci_dev)
8543 struct pqi_ctrl_info *ctrl_info;
8545 ctrl_info = pci_get_drvdata(pci_dev);
8547 dev_err(&pci_dev->dev,
8548 "cache could not be flushed\n");
8552 pqi_wait_until_ofa_finished(ctrl_info);
8554 pqi_scsi_block_requests(ctrl_info);
8555 pqi_ctrl_block_device_reset(ctrl_info);
8556 pqi_ctrl_block_requests(ctrl_info);
8557 pqi_ctrl_wait_until_quiesced(ctrl_info);
8560 * Write all data in the controller's battery-backed cache to
8563 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
8565 dev_err(&pci_dev->dev,
8566 "unable to flush controller cache\n");
8568 pqi_crash_if_pending_command(ctrl_info);
8569 pqi_reset(ctrl_info);
8572 static void pqi_process_lockup_action_param(void)
8576 if (!pqi_lockup_action_param)
8579 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
8580 if (strcmp(pqi_lockup_action_param,
8581 pqi_lockup_actions[i].name) == 0) {
8582 pqi_lockup_action = pqi_lockup_actions[i].action;
8587 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
8588 DRIVER_NAME_SHORT, pqi_lockup_action_param);
8591 static void pqi_process_module_params(void)
8593 pqi_process_lockup_action_param();
8596 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
8598 struct pqi_ctrl_info *ctrl_info;
8600 ctrl_info = pci_get_drvdata(pci_dev);
8602 pqi_wait_until_ofa_finished(ctrl_info);
8604 pqi_ctrl_block_scan(ctrl_info);
8605 pqi_scsi_block_requests(ctrl_info);
8606 pqi_ctrl_block_device_reset(ctrl_info);
8607 pqi_ctrl_block_requests(ctrl_info);
8608 pqi_ctrl_wait_until_quiesced(ctrl_info);
8609 pqi_flush_cache(ctrl_info, SUSPEND);
8610 pqi_stop_heartbeat_timer(ctrl_info);
8612 pqi_crash_if_pending_command(ctrl_info);
8614 if (state.event == PM_EVENT_FREEZE)
8617 pci_save_state(pci_dev);
8618 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
8620 ctrl_info->controller_online = false;
8621 ctrl_info->pqi_mode_enabled = false;
8626 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
8629 struct pqi_ctrl_info *ctrl_info;
8631 ctrl_info = pci_get_drvdata(pci_dev);
8633 if (pci_dev->current_state != PCI_D0) {
8634 ctrl_info->max_hw_queue_index = 0;
8635 pqi_free_interrupts(ctrl_info);
8636 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
8637 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
8638 IRQF_SHARED, DRIVER_NAME_SHORT,
8639 &ctrl_info->queue_groups[0]);
8641 dev_err(&ctrl_info->pci_dev->dev,
8642 "irq %u init failed with error %d\n",
8646 pqi_ctrl_unblock_device_reset(ctrl_info);
8647 pqi_ctrl_unblock_requests(ctrl_info);
8648 pqi_scsi_unblock_requests(ctrl_info);
8649 pqi_ctrl_unblock_scan(ctrl_info);
8653 pci_set_power_state(pci_dev, PCI_D0);
8654 pci_restore_state(pci_dev);
8656 pqi_ctrl_unblock_device_reset(ctrl_info);
8657 pqi_ctrl_unblock_requests(ctrl_info);
8658 pqi_scsi_unblock_requests(ctrl_info);
8659 pqi_ctrl_unblock_scan(ctrl_info);
8661 return pqi_ctrl_init_resume(ctrl_info);
8664 /* Define the PCI IDs for the controllers that we support. */
8665 static const struct pci_device_id pqi_pci_id_table[] = {
8667 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8671 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8675 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8679 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8683 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8687 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8691 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8695 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8699 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8703 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8707 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8711 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8715 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8719 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8723 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8727 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8731 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8735 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8739 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8743 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8747 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8751 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8755 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8759 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8763 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8767 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8771 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8775 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8779 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8783 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8787 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8791 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8795 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8799 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8803 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8807 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8811 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8812 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
8815 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8816 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
8819 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8820 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
8823 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8824 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
8827 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8828 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
8831 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8832 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
8835 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8836 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
8839 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8840 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
8843 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8844 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
8847 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8848 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
8851 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8852 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
8855 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8856 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
8859 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8860 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
8863 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8864 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
8867 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8868 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
8871 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8872 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
8875 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8876 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
8879 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8880 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
8883 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8884 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
8887 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8888 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
8891 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8892 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
8895 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8896 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
8899 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8900 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
8903 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8904 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
8907 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8908 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
8911 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8912 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
8915 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8916 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
8919 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8920 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
8923 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8924 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
8927 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8928 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
8931 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8932 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
8935 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8936 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
8939 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8940 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
8943 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8944 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
8947 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8948 PCI_VENDOR_ID_ADAPTEC2, 0x1400)
8951 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8952 PCI_VENDOR_ID_ADAPTEC2, 0x1402)
8955 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8956 PCI_VENDOR_ID_ADAPTEC2, 0x1410)
8959 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8960 PCI_VENDOR_ID_ADAPTEC2, 0x1411)
8963 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8964 PCI_VENDOR_ID_ADAPTEC2, 0x1412)
8967 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8968 PCI_VENDOR_ID_ADAPTEC2, 0x1420)
8971 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8972 PCI_VENDOR_ID_ADAPTEC2, 0x1430)
8975 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8976 PCI_VENDOR_ID_ADAPTEC2, 0x1440)
8979 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8980 PCI_VENDOR_ID_ADAPTEC2, 0x1441)
8983 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8984 PCI_VENDOR_ID_ADAPTEC2, 0x1450)
8987 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8988 PCI_VENDOR_ID_ADAPTEC2, 0x1452)
8991 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8992 PCI_VENDOR_ID_ADAPTEC2, 0x1460)
8995 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8996 PCI_VENDOR_ID_ADAPTEC2, 0x1461)
8999 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9000 PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9003 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9004 PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9007 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9008 PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9011 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9012 PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9015 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9016 PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9019 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9020 PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9023 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9024 PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9027 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9028 PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9031 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9032 PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9035 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9036 PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
9039 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9040 PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
9043 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9044 PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
9047 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9048 PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
9051 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9052 PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
9055 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9056 PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
9059 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9060 PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
9063 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9064 PCI_VENDOR_ID_ADVANTECH, 0x8312)
9067 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9068 PCI_VENDOR_ID_DELL, 0x1fe0)
9071 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9072 PCI_VENDOR_ID_HP, 0x0600)
9075 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9076 PCI_VENDOR_ID_HP, 0x0601)
9079 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9080 PCI_VENDOR_ID_HP, 0x0602)
9083 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9084 PCI_VENDOR_ID_HP, 0x0603)
9087 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9088 PCI_VENDOR_ID_HP, 0x0609)
9091 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9092 PCI_VENDOR_ID_HP, 0x0650)
9095 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9096 PCI_VENDOR_ID_HP, 0x0651)
9099 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9100 PCI_VENDOR_ID_HP, 0x0652)
9103 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9104 PCI_VENDOR_ID_HP, 0x0653)
9107 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9108 PCI_VENDOR_ID_HP, 0x0654)
9111 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9112 PCI_VENDOR_ID_HP, 0x0655)
9115 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9116 PCI_VENDOR_ID_HP, 0x0700)
9119 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9120 PCI_VENDOR_ID_HP, 0x0701)
9123 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9124 PCI_VENDOR_ID_HP, 0x1001)
9127 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9128 PCI_VENDOR_ID_HP, 0x1002)
9131 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9132 PCI_VENDOR_ID_HP, 0x1100)
9135 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9136 PCI_VENDOR_ID_HP, 0x1101)
9139 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9143 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9147 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9151 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9155 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9159 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9163 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9167 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9171 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9172 PCI_VENDOR_ID_GIGABYTE, 0x1000)
9175 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9176 PCI_ANY_ID, PCI_ANY_ID)
9181 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
9183 static struct pci_driver pqi_pci_driver = {
9184 .name = DRIVER_NAME_SHORT,
9185 .id_table = pqi_pci_id_table,
9186 .probe = pqi_pci_probe,
9187 .remove = pqi_pci_remove,
9188 .shutdown = pqi_shutdown,
9189 #if defined(CONFIG_PM)
9190 .suspend = pqi_suspend,
9191 .resume = pqi_resume,
9195 static int __init pqi_init(void)
9199 pr_info(DRIVER_NAME "\n");
9201 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
9202 if (!pqi_sas_transport_template)
9205 pqi_process_module_params();
9207 rc = pci_register_driver(&pqi_pci_driver);
9209 sas_release_transport(pqi_sas_transport_template);
9214 static void __exit pqi_cleanup(void)
9216 pci_unregister_driver(&pqi_pci_driver);
9217 sas_release_transport(pqi_sas_transport_template);
9220 module_init(pqi_init);
9221 module_exit(pqi_cleanup);
9223 static void __attribute__((unused)) verify_structures(void)
9225 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9226 sis_host_to_ctrl_doorbell) != 0x20);
9227 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9228 sis_interrupt_mask) != 0x34);
9229 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9230 sis_ctrl_to_host_doorbell) != 0x9c);
9231 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9232 sis_ctrl_to_host_doorbell_clear) != 0xa0);
9233 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9234 sis_driver_scratch) != 0xb0);
9235 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9236 sis_product_identifier) != 0xb4);
9237 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9238 sis_firmware_status) != 0xbc);
9239 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9240 sis_mailbox) != 0x1000);
9241 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9242 pqi_registers) != 0x4000);
9244 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9246 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9248 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9249 response_queue_id) != 0x4);
9250 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9251 driver_flags) != 0x6);
9252 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
9254 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9256 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9257 service_response) != 0x1);
9258 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9259 data_present) != 0x2);
9260 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9262 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9263 residual_count) != 0x4);
9264 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9265 data_length) != 0x8);
9266 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9268 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9270 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
9272 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9273 data_in_result) != 0x0);
9274 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9275 data_out_result) != 0x1);
9276 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9278 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9280 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9281 status_qualifier) != 0x6);
9282 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9283 sense_data_length) != 0x8);
9284 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9285 response_data_length) != 0xa);
9286 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9287 data_in_transferred) != 0xc);
9288 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9289 data_out_transferred) != 0x10);
9290 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9292 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
9294 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9296 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9297 function_and_status_code) != 0x8);
9298 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9299 max_admin_iq_elements) != 0x10);
9300 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9301 max_admin_oq_elements) != 0x11);
9302 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9303 admin_iq_element_length) != 0x12);
9304 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9305 admin_oq_element_length) != 0x13);
9306 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9307 max_reset_timeout) != 0x14);
9308 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9309 legacy_intx_status) != 0x18);
9310 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9311 legacy_intx_mask_set) != 0x1c);
9312 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9313 legacy_intx_mask_clear) != 0x20);
9314 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9315 device_status) != 0x40);
9316 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9317 admin_iq_pi_offset) != 0x48);
9318 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9319 admin_oq_ci_offset) != 0x50);
9320 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9321 admin_iq_element_array_addr) != 0x58);
9322 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9323 admin_oq_element_array_addr) != 0x60);
9324 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9325 admin_iq_ci_addr) != 0x68);
9326 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9327 admin_oq_pi_addr) != 0x70);
9328 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9329 admin_iq_num_elements) != 0x78);
9330 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9331 admin_oq_num_elements) != 0x79);
9332 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9333 admin_queue_int_msg_num) != 0x7a);
9334 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9335 device_error) != 0x80);
9336 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9337 error_details) != 0x88);
9338 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9339 device_reset) != 0x90);
9340 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9341 power_action) != 0x94);
9342 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
9344 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9345 header.iu_type) != 0);
9346 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9347 header.iu_length) != 2);
9348 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9349 header.driver_flags) != 6);
9350 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9352 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9353 function_code) != 10);
9354 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9355 data.report_device_capability.buffer_length) != 44);
9356 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9357 data.report_device_capability.sg_descriptor) != 48);
9358 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9359 data.create_operational_iq.queue_id) != 12);
9360 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9361 data.create_operational_iq.element_array_addr) != 16);
9362 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9363 data.create_operational_iq.ci_addr) != 24);
9364 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9365 data.create_operational_iq.num_elements) != 32);
9366 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9367 data.create_operational_iq.element_length) != 34);
9368 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9369 data.create_operational_iq.queue_protocol) != 36);
9370 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9371 data.create_operational_oq.queue_id) != 12);
9372 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9373 data.create_operational_oq.element_array_addr) != 16);
9374 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9375 data.create_operational_oq.pi_addr) != 24);
9376 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9377 data.create_operational_oq.num_elements) != 32);
9378 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9379 data.create_operational_oq.element_length) != 34);
9380 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9381 data.create_operational_oq.queue_protocol) != 36);
9382 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9383 data.create_operational_oq.int_msg_num) != 40);
9384 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9385 data.create_operational_oq.coalescing_count) != 42);
9386 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9387 data.create_operational_oq.min_coalescing_time) != 44);
9388 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9389 data.create_operational_oq.max_coalescing_time) != 48);
9390 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9391 data.delete_operational_queue.queue_id) != 12);
9392 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
9393 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9394 data.create_operational_iq) != 64 - 11);
9395 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9396 data.create_operational_oq) != 64 - 11);
9397 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9398 data.delete_operational_queue) != 64 - 11);
9400 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9401 header.iu_type) != 0);
9402 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9403 header.iu_length) != 2);
9404 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9405 header.driver_flags) != 6);
9406 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9408 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9409 function_code) != 10);
9410 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9412 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9413 data.create_operational_iq.status_descriptor) != 12);
9414 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9415 data.create_operational_iq.iq_pi_offset) != 16);
9416 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9417 data.create_operational_oq.status_descriptor) != 12);
9418 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9419 data.create_operational_oq.oq_ci_offset) != 16);
9420 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
9422 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9423 header.iu_type) != 0);
9424 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9425 header.iu_length) != 2);
9426 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9427 header.response_queue_id) != 4);
9428 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9429 header.driver_flags) != 6);
9430 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9432 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9434 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9435 buffer_length) != 12);
9436 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9438 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9439 protocol_specific) != 24);
9440 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9441 error_index) != 27);
9442 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9444 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9446 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9447 sg_descriptors) != 64);
9448 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
9449 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
9451 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9452 header.iu_type) != 0);
9453 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9454 header.iu_length) != 2);
9455 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9456 header.response_queue_id) != 4);
9457 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9458 header.driver_flags) != 6);
9459 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9461 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9463 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9464 buffer_length) != 16);
9465 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9466 data_encryption_key_index) != 22);
9467 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9468 encrypt_tweak_lower) != 24);
9469 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9470 encrypt_tweak_upper) != 28);
9471 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9473 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9474 error_index) != 48);
9475 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9476 num_sg_descriptors) != 50);
9477 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9479 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9481 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9482 sg_descriptors) != 64);
9483 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
9484 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
9486 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9487 header.iu_type) != 0);
9488 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9489 header.iu_length) != 2);
9490 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9492 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9493 error_index) != 10);
9495 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9496 header.iu_type) != 0);
9497 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9498 header.iu_length) != 2);
9499 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9500 header.response_queue_id) != 4);
9501 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9503 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9504 data.report_event_configuration.buffer_length) != 12);
9505 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9506 data.report_event_configuration.sg_descriptors) != 16);
9507 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9508 data.set_event_configuration.global_event_oq_id) != 10);
9509 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9510 data.set_event_configuration.buffer_length) != 12);
9511 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9512 data.set_event_configuration.sg_descriptors) != 16);
9514 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
9515 max_inbound_iu_length) != 6);
9516 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
9517 max_outbound_iu_length) != 14);
9518 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
9520 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9522 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9523 iq_arbitration_priority_support_bitmask) != 8);
9524 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9525 maximum_aw_a) != 9);
9526 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9527 maximum_aw_b) != 10);
9528 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9529 maximum_aw_c) != 11);
9530 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9531 max_inbound_queues) != 16);
9532 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9533 max_elements_per_iq) != 18);
9534 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9535 max_iq_element_length) != 24);
9536 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9537 min_iq_element_length) != 26);
9538 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9539 max_outbound_queues) != 30);
9540 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9541 max_elements_per_oq) != 32);
9542 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9543 intr_coalescing_time_granularity) != 34);
9544 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9545 max_oq_element_length) != 36);
9546 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9547 min_oq_element_length) != 38);
9548 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9549 iu_layer_descriptors) != 64);
9550 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
9552 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
9554 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
9556 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
9558 BUILD_BUG_ON(offsetof(struct pqi_event_config,
9559 num_event_descriptors) != 2);
9560 BUILD_BUG_ON(offsetof(struct pqi_event_config,
9563 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
9564 ARRAY_SIZE(pqi_supported_event_types));
9566 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9567 header.iu_type) != 0);
9568 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9569 header.iu_length) != 2);
9570 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9572 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9574 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9575 additional_event_id) != 12);
9576 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9578 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
9580 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9581 header.iu_type) != 0);
9582 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9583 header.iu_length) != 2);
9584 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9586 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9588 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9589 additional_event_id) != 12);
9590 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
9592 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9593 header.iu_type) != 0);
9594 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9595 header.iu_length) != 2);
9596 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9598 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9600 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9602 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9604 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9605 protocol_specific) != 24);
9606 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9607 outbound_queue_id_to_manage) != 26);
9608 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9609 request_id_to_manage) != 28);
9610 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9611 task_management_function) != 30);
9612 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
9614 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9615 header.iu_type) != 0);
9616 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9617 header.iu_length) != 2);
9618 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9620 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9622 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9623 additional_response_info) != 12);
9624 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9625 response_code) != 15);
9626 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
9628 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9629 configured_logical_drive_count) != 0);
9630 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9631 configuration_signature) != 1);
9632 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9633 firmware_version_short) != 5);
9634 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9635 extended_logical_unit_count) != 154);
9636 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9637 firmware_build_number) != 190);
9638 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9640 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9641 product_id) != 208);
9642 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9643 extra_controller_flags) != 286);
9644 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9645 controller_mode) != 292);
9646 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9647 spare_part_number) != 293);
9648 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9649 firmware_version_long) != 325);
9651 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9652 phys_bay_in_box) != 115);
9653 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9654 device_type) != 120);
9655 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9656 redundant_path_present_map) != 1736);
9657 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9658 active_path_number) != 1738);
9659 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9660 alternate_paths_phys_connector) != 1739);
9661 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9662 alternate_paths_phys_box_on_port) != 1755);
9663 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9664 current_queue_depth_limit) != 1796);
9665 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
9667 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
9668 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
9670 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
9671 subpage_code) != 1);
9672 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
9673 buffer_length) != 2);
9675 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
9676 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
9678 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
9679 subpage_code) != 1);
9680 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
9683 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
9685 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9687 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9688 firmware_read_support) != 4);
9689 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9690 driver_read_support) != 5);
9691 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9692 firmware_write_support) != 6);
9693 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9694 driver_write_support) != 7);
9695 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9696 max_transfer_encrypted_sas_sata) != 8);
9697 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9698 max_transfer_encrypted_nvme) != 10);
9699 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9700 max_write_raid_5_6) != 12);
9701 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9702 max_write_raid_1_10_2drive) != 14);
9703 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9704 max_write_raid_1_10_3drive) != 16);
9706 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
9707 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
9708 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
9709 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9710 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
9711 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9712 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
9713 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
9714 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9715 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
9716 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
9717 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9719 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
9720 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
9721 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);