2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5 * Copyright (C) 2012-2014 LSI Corporation
6 * Copyright (C) 2013-2014 Avago Technologies
7 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/aer.h>
56 #include <linux/raid_class.h>
57 #include <linux/blk-mq-pci.h>
58 #include <asm/unaligned.h>
60 #include "mpt3sas_base.h"
62 #define RAID_CHANNEL 1
64 #define PCIE_CHANNEL 2
67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
68 struct _sas_node *sas_expander);
69 static void _firmware_event_work(struct work_struct *work);
71 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
72 struct _sas_device *sas_device);
73 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
74 u8 retry_count, u8 is_pd);
75 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
76 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
77 struct _pcie_device *pcie_device);
79 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
80 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
81 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
83 /* global parameters */
84 LIST_HEAD(mpt3sas_ioc_list);
85 /* global ioc lock for list operations */
86 DEFINE_SPINLOCK(gioc_lock);
88 MODULE_AUTHOR(MPT3SAS_AUTHOR);
89 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
90 MODULE_LICENSE("GPL");
91 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
92 MODULE_ALIAS("mpt2sas");
94 /* local parameters */
95 static u8 scsi_io_cb_idx = -1;
96 static u8 tm_cb_idx = -1;
97 static u8 ctl_cb_idx = -1;
98 static u8 base_cb_idx = -1;
99 static u8 port_enable_cb_idx = -1;
100 static u8 transport_cb_idx = -1;
101 static u8 scsih_cb_idx = -1;
102 static u8 config_cb_idx = -1;
106 static u8 tm_tr_cb_idx = -1 ;
107 static u8 tm_tr_volume_cb_idx = -1 ;
108 static u8 tm_sas_control_cb_idx = -1;
110 /* command line options */
111 static u32 logging_level;
112 MODULE_PARM_DESC(logging_level,
113 " bits for enabling additional logging info (default=0)");
116 static ushort max_sectors = 0xFFFF;
117 module_param(max_sectors, ushort, 0444);
118 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
121 static int missing_delay[2] = {-1, -1};
122 module_param_array(missing_delay, int, NULL, 0444);
123 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
125 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
126 #define MPT3SAS_MAX_LUN (16895)
127 static u64 max_lun = MPT3SAS_MAX_LUN;
128 module_param(max_lun, ullong, 0444);
129 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
131 static ushort hbas_to_enumerate;
132 module_param(hbas_to_enumerate, ushort, 0444);
133 MODULE_PARM_DESC(hbas_to_enumerate,
134 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
135 1 - enumerates only SAS 2.0 generation HBAs\n \
136 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
138 /* diag_buffer_enable is bitwise
140 * bit 1 set = SNAPSHOT
141 * bit 2 set = EXTENDED
143 * Either bit can be set, or both
145 static int diag_buffer_enable = -1;
146 module_param(diag_buffer_enable, int, 0444);
147 MODULE_PARM_DESC(diag_buffer_enable,
148 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
149 static int disable_discovery = -1;
150 module_param(disable_discovery, int, 0444);
151 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
154 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
155 static int prot_mask = -1;
156 module_param(prot_mask, int, 0444);
157 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
159 static bool enable_sdev_max_qd;
160 module_param(enable_sdev_max_qd, bool, 0444);
161 MODULE_PARM_DESC(enable_sdev_max_qd,
162 "Enable sdev max qd as can_queue, def=disabled(0)");
164 static int multipath_on_hba = -1;
165 module_param(multipath_on_hba, int, 0);
166 MODULE_PARM_DESC(multipath_on_hba,
167 "Multipath support to add same target device\n\t\t"
168 "as many times as it is visible to HBA from various paths\n\t\t"
170 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
171 "\t SAS 3.5 HBA - This will be enabled)");
173 static int host_tagset_enable = 1;
174 module_param(host_tagset_enable, int, 0444);
175 MODULE_PARM_DESC(host_tagset_enable,
176 "Shared host tagset enable/disable Default: enable(1)");
178 /* raid transport support */
179 static struct raid_template *mpt3sas_raid_template;
180 static struct raid_template *mpt2sas_raid_template;
184 * struct sense_info - common structure for obtaining sense keys
186 * @asc: additional sense code
187 * @ascq: additional sense code qualifier
195 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
196 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
197 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
198 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
199 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
201 * struct fw_event_work - firmware event struct
202 * @list: link list framework
203 * @work: work object (ioc->fault_reset_work_q)
204 * @ioc: per adapter object
205 * @device_handle: device handle
206 * @VF_ID: virtual function id
207 * @VP_ID: virtual port id
208 * @ignore: flag meaning this event has been marked to ignore
209 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
210 * @refcount: kref for this event
211 * @event_data: reply event data payload follows
213 * This object stored on ioc->fw_event_list.
215 struct fw_event_work {
216 struct list_head list;
217 struct work_struct work;
219 struct MPT3SAS_ADAPTER *ioc;
225 struct kref refcount;
226 char event_data[] __aligned(4);
229 static void fw_event_work_free(struct kref *r)
231 kfree(container_of(r, struct fw_event_work, refcount));
234 static void fw_event_work_get(struct fw_event_work *fw_work)
236 kref_get(&fw_work->refcount);
239 static void fw_event_work_put(struct fw_event_work *fw_work)
241 kref_put(&fw_work->refcount, fw_event_work_free);
244 static struct fw_event_work *alloc_fw_event_work(int len)
246 struct fw_event_work *fw_event;
248 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
252 kref_init(&fw_event->refcount);
257 * struct _scsi_io_transfer - scsi io transfer
258 * @handle: sas device handle (assigned by firmware)
259 * @is_raid: flag set for hidden raid components
260 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
261 * @data_length: data transfer length
262 * @data_dma: dma pointer to data
265 * @cdb_length: cdb length
267 * @timeout: timeout for this command
268 * @VF_ID: virtual function id
269 * @VP_ID: virtual port id
270 * @valid_reply: flag set for reply message
271 * @sense_length: sense length
272 * @ioc_status: ioc status
273 * @scsi_state: scsi state
274 * @scsi_status: scsi staus
275 * @log_info: log information
276 * @transfer_length: data length transfer when there is a reply message
278 * Used for sending internal scsi commands to devices within this module.
279 * Refer to _scsi_send_scsi_io().
281 struct _scsi_io_transfer {
284 enum dma_data_direction dir;
287 u8 sense[SCSI_SENSE_BUFFERSIZE];
295 /* the following bits are only valid when 'valid_reply = 1' */
305 * _scsih_set_debug_level - global setting of ioc->logging_level.
309 * Note: The logging levels are defined in mpt3sas_debug.h.
312 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
314 int ret = param_set_int(val, kp);
315 struct MPT3SAS_ADAPTER *ioc;
320 pr_info("setting logging_level(0x%08x)\n", logging_level);
321 spin_lock(&gioc_lock);
322 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
323 ioc->logging_level = logging_level;
324 spin_unlock(&gioc_lock);
327 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
328 &logging_level, 0644);
331 * _scsih_srch_boot_sas_address - search based on sas_address
332 * @sas_address: sas address
333 * @boot_device: boot device object from bios page 2
335 * Return: 1 when there's a match, 0 means no match.
338 _scsih_srch_boot_sas_address(u64 sas_address,
339 Mpi2BootDeviceSasWwid_t *boot_device)
341 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
345 * _scsih_srch_boot_device_name - search based on device name
346 * @device_name: device name specified in INDENTIFY fram
347 * @boot_device: boot device object from bios page 2
349 * Return: 1 when there's a match, 0 means no match.
352 _scsih_srch_boot_device_name(u64 device_name,
353 Mpi2BootDeviceDeviceName_t *boot_device)
355 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
359 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
360 * @enclosure_logical_id: enclosure logical id
361 * @slot_number: slot number
362 * @boot_device: boot device object from bios page 2
364 * Return: 1 when there's a match, 0 means no match.
367 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
368 Mpi2BootDeviceEnclosureSlot_t *boot_device)
370 return (enclosure_logical_id == le64_to_cpu(boot_device->
371 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
372 SlotNumber)) ? 1 : 0;
376 * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
377 * port number from port list
378 * @ioc: per adapter object
379 * @port_id: port number
380 * @bypass_dirty_port_flag: when set look the matching hba port entry even
381 * if hba port entry is marked as dirty.
383 * Search for hba port entry corresponding to provided port number,
384 * if available return port object otherwise return NULL.
387 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
388 u8 port_id, u8 bypass_dirty_port_flag)
390 struct hba_port *port, *port_next;
393 * When multipath_on_hba is disabled then
394 * search the hba_port entry using default
397 if (!ioc->multipath_on_hba)
398 port_id = MULTIPATH_DISABLED_PORT_ID;
400 list_for_each_entry_safe(port, port_next,
401 &ioc->port_table_list, list) {
402 if (port->port_id != port_id)
404 if (bypass_dirty_port_flag)
406 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
412 * Allocate hba_port object for default port id (i.e. 255)
413 * when multipath_on_hba is disabled for the HBA.
414 * And add this object to port_table_list.
416 if (!ioc->multipath_on_hba) {
417 port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
421 port->port_id = port_id;
423 "hba_port entry: %p, port: %d is added to hba_port list\n",
424 port, port->port_id);
425 list_add_tail(&port->list,
426 &ioc->port_table_list);
433 * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
434 * @ioc: per adapter object
435 * @port: hba_port object
438 * Return virtual_phy object corresponding to phy number.
441 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
442 struct hba_port *port, u32 phy)
444 struct virtual_phy *vphy, *vphy_next;
446 if (!port->vphys_mask)
449 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
450 if (vphy->phy_mask & (1 << phy))
457 * _scsih_is_boot_device - search for matching boot device.
458 * @sas_address: sas address
459 * @device_name: device name specified in INDENTIFY fram
460 * @enclosure_logical_id: enclosure logical id
462 * @form: specifies boot device form
463 * @boot_device: boot device object from bios page 2
465 * Return: 1 when there's a match, 0 means no match.
468 _scsih_is_boot_device(u64 sas_address, u64 device_name,
469 u64 enclosure_logical_id, u16 slot, u8 form,
470 Mpi2BiosPage2BootDevice_t *boot_device)
475 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
478 rc = _scsih_srch_boot_sas_address(
479 sas_address, &boot_device->SasWwid);
481 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
482 if (!enclosure_logical_id)
484 rc = _scsih_srch_boot_encl_slot(
485 enclosure_logical_id,
486 slot, &boot_device->EnclosureSlot);
488 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
491 rc = _scsih_srch_boot_device_name(
492 device_name, &boot_device->DeviceName);
494 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
502 * _scsih_get_sas_address - set the sas_address for given device handle
504 * @handle: device handle
505 * @sas_address: sas address
507 * Return: 0 success, non-zero when failure
510 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
513 Mpi2SasDevicePage0_t sas_device_pg0;
514 Mpi2ConfigReply_t mpi_reply;
519 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
520 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
521 ioc_err(ioc, "failure at %s:%d/%s()!\n",
522 __FILE__, __LINE__, __func__);
526 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
527 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
528 /* For HBA, vSES doesn't return HBA SAS address. Instead return
529 * vSES's sas address.
531 if ((handle <= ioc->sas_hba.num_phys) &&
532 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
533 MPI2_SAS_DEVICE_INFO_SEP)))
534 *sas_address = ioc->sas_hba.sas_address;
536 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
540 /* we hit this because the given parent handle doesn't exist */
541 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
544 /* else error case */
545 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
546 handle, ioc_status, __FILE__, __LINE__, __func__);
551 * _scsih_determine_boot_device - determine boot device.
552 * @ioc: per adapter object
553 * @device: sas_device or pcie_device object
554 * @channel: SAS or PCIe channel
556 * Determines whether this device should be first reported device to
557 * to scsi-ml or sas transport, this purpose is for persistent boot device.
558 * There are primary, alternate, and current entries in bios page 2. The order
559 * priority is primary, alternate, then current. This routine saves
560 * the corresponding device object.
561 * The saved data to be used later in _scsih_probe_boot_devices().
564 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
567 struct _sas_device *sas_device;
568 struct _pcie_device *pcie_device;
569 struct _raid_device *raid_device;
572 u64 enclosure_logical_id;
575 /* only process this function when driver loads */
576 if (!ioc->is_driver_loading)
579 /* no Bios, return immediately */
580 if (!ioc->bios_pg3.BiosVersion)
583 if (channel == RAID_CHANNEL) {
584 raid_device = device;
585 sas_address = raid_device->wwid;
587 enclosure_logical_id = 0;
589 } else if (channel == PCIE_CHANNEL) {
590 pcie_device = device;
591 sas_address = pcie_device->wwid;
593 enclosure_logical_id = 0;
597 sas_address = sas_device->sas_address;
598 device_name = sas_device->device_name;
599 enclosure_logical_id = sas_device->enclosure_logical_id;
600 slot = sas_device->slot;
603 if (!ioc->req_boot_device.device) {
604 if (_scsih_is_boot_device(sas_address, device_name,
605 enclosure_logical_id, slot,
606 (ioc->bios_pg2.ReqBootDeviceForm &
607 MPI2_BIOSPAGE2_FORM_MASK),
608 &ioc->bios_pg2.RequestedBootDevice)) {
610 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
611 __func__, (u64)sas_address));
612 ioc->req_boot_device.device = device;
613 ioc->req_boot_device.channel = channel;
617 if (!ioc->req_alt_boot_device.device) {
618 if (_scsih_is_boot_device(sas_address, device_name,
619 enclosure_logical_id, slot,
620 (ioc->bios_pg2.ReqAltBootDeviceForm &
621 MPI2_BIOSPAGE2_FORM_MASK),
622 &ioc->bios_pg2.RequestedAltBootDevice)) {
624 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
625 __func__, (u64)sas_address));
626 ioc->req_alt_boot_device.device = device;
627 ioc->req_alt_boot_device.channel = channel;
631 if (!ioc->current_boot_device.device) {
632 if (_scsih_is_boot_device(sas_address, device_name,
633 enclosure_logical_id, slot,
634 (ioc->bios_pg2.CurrentBootDeviceForm &
635 MPI2_BIOSPAGE2_FORM_MASK),
636 &ioc->bios_pg2.CurrentBootDevice)) {
638 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
639 __func__, (u64)sas_address));
640 ioc->current_boot_device.device = device;
641 ioc->current_boot_device.channel = channel;
646 static struct _sas_device *
647 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
648 struct MPT3SAS_TARGET *tgt_priv)
650 struct _sas_device *ret;
652 assert_spin_locked(&ioc->sas_device_lock);
654 ret = tgt_priv->sas_dev;
661 static struct _sas_device *
662 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
663 struct MPT3SAS_TARGET *tgt_priv)
665 struct _sas_device *ret;
668 spin_lock_irqsave(&ioc->sas_device_lock, flags);
669 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
670 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
675 static struct _pcie_device *
676 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
677 struct MPT3SAS_TARGET *tgt_priv)
679 struct _pcie_device *ret;
681 assert_spin_locked(&ioc->pcie_device_lock);
683 ret = tgt_priv->pcie_dev;
685 pcie_device_get(ret);
691 * mpt3sas_get_pdev_from_target - pcie device search
692 * @ioc: per adapter object
693 * @tgt_priv: starget private object
695 * Context: This function will acquire ioc->pcie_device_lock and will release
696 * before returning the pcie_device object.
698 * This searches for pcie_device from target, then return pcie_device object.
700 static struct _pcie_device *
701 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
702 struct MPT3SAS_TARGET *tgt_priv)
704 struct _pcie_device *ret;
707 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
708 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
709 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
716 * __mpt3sas_get_sdev_by_rphy - sas device search
717 * @ioc: per adapter object
718 * @rphy: sas_rphy pointer
720 * Context: This function will acquire ioc->sas_device_lock and will release
721 * before returning the sas_device object.
723 * This searches for sas_device from rphy object
724 * then return sas_device object.
727 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
728 struct sas_rphy *rphy)
730 struct _sas_device *sas_device;
732 assert_spin_locked(&ioc->sas_device_lock);
734 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
735 if (sas_device->rphy != rphy)
737 sas_device_get(sas_device);
742 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
743 if (sas_device->rphy != rphy)
745 sas_device_get(sas_device);
753 * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
754 * sas address from sas_device_list list
755 * @ioc: per adapter object
756 * @sas_address: device sas address
759 * Search for _sas_device object corresponding to provided sas address,
760 * if available return _sas_device object address otherwise return NULL.
763 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
764 u64 sas_address, struct hba_port *port)
766 struct _sas_device *sas_device;
771 assert_spin_locked(&ioc->sas_device_lock);
773 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
774 if (sas_device->sas_address != sas_address)
776 if (sas_device->port != port)
778 sas_device_get(sas_device);
782 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
783 if (sas_device->sas_address != sas_address)
785 if (sas_device->port != port)
787 sas_device_get(sas_device);
795 * mpt3sas_get_sdev_by_addr - sas device search
796 * @ioc: per adapter object
797 * @sas_address: sas address
798 * @port: hba port entry
799 * Context: Calling function should acquire ioc->sas_device_lock
801 * This searches for sas_device based on sas_address & port number,
802 * then return sas_device object.
805 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
806 u64 sas_address, struct hba_port *port)
808 struct _sas_device *sas_device;
811 spin_lock_irqsave(&ioc->sas_device_lock, flags);
812 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
814 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
819 static struct _sas_device *
820 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
822 struct _sas_device *sas_device;
824 assert_spin_locked(&ioc->sas_device_lock);
826 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
827 if (sas_device->handle == handle)
830 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
831 if (sas_device->handle == handle)
837 sas_device_get(sas_device);
842 * mpt3sas_get_sdev_by_handle - sas device search
843 * @ioc: per adapter object
844 * @handle: sas device handle (assigned by firmware)
845 * Context: Calling function should acquire ioc->sas_device_lock
847 * This searches for sas_device based on sas_address, then return sas_device
851 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
853 struct _sas_device *sas_device;
856 spin_lock_irqsave(&ioc->sas_device_lock, flags);
857 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
858 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
864 * _scsih_display_enclosure_chassis_info - display device location info
865 * @ioc: per adapter object
866 * @sas_device: per sas device object
867 * @sdev: scsi device struct
868 * @starget: scsi target struct
871 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
872 struct _sas_device *sas_device, struct scsi_device *sdev,
873 struct scsi_target *starget)
876 if (sas_device->enclosure_handle != 0)
877 sdev_printk(KERN_INFO, sdev,
878 "enclosure logical id (0x%016llx), slot(%d) \n",
880 sas_device->enclosure_logical_id,
882 if (sas_device->connector_name[0] != '\0')
883 sdev_printk(KERN_INFO, sdev,
884 "enclosure level(0x%04x), connector name( %s)\n",
885 sas_device->enclosure_level,
886 sas_device->connector_name);
887 if (sas_device->is_chassis_slot_valid)
888 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
889 sas_device->chassis_slot);
890 } else if (starget) {
891 if (sas_device->enclosure_handle != 0)
892 starget_printk(KERN_INFO, starget,
893 "enclosure logical id(0x%016llx), slot(%d) \n",
895 sas_device->enclosure_logical_id,
897 if (sas_device->connector_name[0] != '\0')
898 starget_printk(KERN_INFO, starget,
899 "enclosure level(0x%04x), connector name( %s)\n",
900 sas_device->enclosure_level,
901 sas_device->connector_name);
902 if (sas_device->is_chassis_slot_valid)
903 starget_printk(KERN_INFO, starget,
904 "chassis slot(0x%04x)\n",
905 sas_device->chassis_slot);
907 if (sas_device->enclosure_handle != 0)
908 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
909 (u64)sas_device->enclosure_logical_id,
911 if (sas_device->connector_name[0] != '\0')
912 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
913 sas_device->enclosure_level,
914 sas_device->connector_name);
915 if (sas_device->is_chassis_slot_valid)
916 ioc_info(ioc, "chassis slot(0x%04x)\n",
917 sas_device->chassis_slot);
922 * _scsih_sas_device_remove - remove sas_device from list.
923 * @ioc: per adapter object
924 * @sas_device: the sas_device object
925 * Context: This function will acquire ioc->sas_device_lock.
927 * If sas_device is on the list, remove it and decrement its reference count.
930 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
931 struct _sas_device *sas_device)
937 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
938 sas_device->handle, (u64)sas_device->sas_address);
940 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
943 * The lock serializes access to the list, but we still need to verify
944 * that nobody removed the entry while we were waiting on the lock.
946 spin_lock_irqsave(&ioc->sas_device_lock, flags);
947 if (!list_empty(&sas_device->list)) {
948 list_del_init(&sas_device->list);
949 sas_device_put(sas_device);
951 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
955 * _scsih_device_remove_by_handle - removing device object by handle
956 * @ioc: per adapter object
957 * @handle: device handle
960 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
962 struct _sas_device *sas_device;
965 if (ioc->shost_recovery)
968 spin_lock_irqsave(&ioc->sas_device_lock, flags);
969 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
971 list_del_init(&sas_device->list);
972 sas_device_put(sas_device);
974 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
976 _scsih_remove_device(ioc, sas_device);
977 sas_device_put(sas_device);
982 * mpt3sas_device_remove_by_sas_address - removing device object by
983 * sas address & port number
984 * @ioc: per adapter object
985 * @sas_address: device sas_address
986 * @port: hba port entry
991 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
992 u64 sas_address, struct hba_port *port)
994 struct _sas_device *sas_device;
997 if (ioc->shost_recovery)
1000 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1001 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
1003 list_del_init(&sas_device->list);
1004 sas_device_put(sas_device);
1006 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1008 _scsih_remove_device(ioc, sas_device);
1009 sas_device_put(sas_device);
1014 * _scsih_sas_device_add - insert sas_device to the list.
1015 * @ioc: per adapter object
1016 * @sas_device: the sas_device object
1017 * Context: This function will acquire ioc->sas_device_lock.
1019 * Adding new object to the ioc->sas_device_list.
1022 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1023 struct _sas_device *sas_device)
1025 unsigned long flags;
1028 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1029 __func__, sas_device->handle,
1030 (u64)sas_device->sas_address));
1032 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1035 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1036 sas_device_get(sas_device);
1037 list_add_tail(&sas_device->list, &ioc->sas_device_list);
1038 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1040 if (ioc->hide_drives) {
1041 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1045 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1046 sas_device->sas_address_parent, sas_device->port)) {
1047 _scsih_sas_device_remove(ioc, sas_device);
1048 } else if (!sas_device->starget) {
1050 * When asyn scanning is enabled, its not possible to remove
1051 * devices while scanning is turned on due to an oops in
1052 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
1054 if (!ioc->is_driver_loading) {
1055 mpt3sas_transport_port_remove(ioc,
1056 sas_device->sas_address,
1057 sas_device->sas_address_parent,
1059 _scsih_sas_device_remove(ioc, sas_device);
1062 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1066 * _scsih_sas_device_init_add - insert sas_device to the list.
1067 * @ioc: per adapter object
1068 * @sas_device: the sas_device object
1069 * Context: This function will acquire ioc->sas_device_lock.
1071 * Adding new object at driver load time to the ioc->sas_device_init_list.
1074 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1075 struct _sas_device *sas_device)
1077 unsigned long flags;
1080 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1081 __func__, sas_device->handle,
1082 (u64)sas_device->sas_address));
1084 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1087 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1088 sas_device_get(sas_device);
1089 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1090 _scsih_determine_boot_device(ioc, sas_device, 0);
1091 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1095 static struct _pcie_device *
1096 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1098 struct _pcie_device *pcie_device;
1100 assert_spin_locked(&ioc->pcie_device_lock);
1102 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1103 if (pcie_device->wwid == wwid)
1106 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1107 if (pcie_device->wwid == wwid)
1113 pcie_device_get(pcie_device);
1119 * mpt3sas_get_pdev_by_wwid - pcie device search
1120 * @ioc: per adapter object
1123 * Context: This function will acquire ioc->pcie_device_lock and will release
1124 * before returning the pcie_device object.
1126 * This searches for pcie_device based on wwid, then return pcie_device object.
1128 static struct _pcie_device *
1129 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1131 struct _pcie_device *pcie_device;
1132 unsigned long flags;
1134 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1135 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1136 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1142 static struct _pcie_device *
1143 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1146 struct _pcie_device *pcie_device;
1148 assert_spin_locked(&ioc->pcie_device_lock);
1150 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1151 if (pcie_device->id == id && pcie_device->channel == channel)
1154 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1155 if (pcie_device->id == id && pcie_device->channel == channel)
1161 pcie_device_get(pcie_device);
1165 static struct _pcie_device *
1166 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1168 struct _pcie_device *pcie_device;
1170 assert_spin_locked(&ioc->pcie_device_lock);
1172 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1173 if (pcie_device->handle == handle)
1176 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1177 if (pcie_device->handle == handle)
1183 pcie_device_get(pcie_device);
1189 * mpt3sas_get_pdev_by_handle - pcie device search
1190 * @ioc: per adapter object
1191 * @handle: Firmware device handle
1193 * Context: This function will acquire ioc->pcie_device_lock and will release
1194 * before returning the pcie_device object.
1196 * This searches for pcie_device based on handle, then return pcie_device
1199 struct _pcie_device *
1200 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1202 struct _pcie_device *pcie_device;
1203 unsigned long flags;
1205 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1206 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1207 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1213 * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1214 * @ioc: per adapter object
1215 * Context: This function will acquire ioc->pcie_device_lock
1217 * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1218 * which has reported maximum among all available NVMe drives.
1219 * Minimum max_shutdown_latency will be six seconds.
1222 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1224 struct _pcie_device *pcie_device;
1225 unsigned long flags;
1226 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1228 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1229 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1230 if (pcie_device->shutdown_latency) {
1231 if (shutdown_latency < pcie_device->shutdown_latency)
1233 pcie_device->shutdown_latency;
1236 ioc->max_shutdown_latency = shutdown_latency;
1237 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1241 * _scsih_pcie_device_remove - remove pcie_device from list.
1242 * @ioc: per adapter object
1243 * @pcie_device: the pcie_device object
1244 * Context: This function will acquire ioc->pcie_device_lock.
1246 * If pcie_device is on the list, remove it and decrement its reference count.
1249 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1250 struct _pcie_device *pcie_device)
1252 unsigned long flags;
1253 int was_on_pcie_device_list = 0;
1254 u8 update_latency = 0;
1258 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1259 pcie_device->handle, (u64)pcie_device->wwid);
1260 if (pcie_device->enclosure_handle != 0)
1261 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1262 (u64)pcie_device->enclosure_logical_id,
1264 if (pcie_device->connector_name[0] != '\0')
1265 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1266 pcie_device->enclosure_level,
1267 pcie_device->connector_name);
1269 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1270 if (!list_empty(&pcie_device->list)) {
1271 list_del_init(&pcie_device->list);
1272 was_on_pcie_device_list = 1;
1274 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1276 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1277 if (was_on_pcie_device_list) {
1278 kfree(pcie_device->serial_number);
1279 pcie_device_put(pcie_device);
1283 * This device's RTD3 Entry Latency matches IOC's
1284 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1285 * from the available drives as current drive is getting removed.
1288 _scsih_set_nvme_max_shutdown_latency(ioc);
1293 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1294 * @ioc: per adapter object
1295 * @handle: device handle
1298 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1300 struct _pcie_device *pcie_device;
1301 unsigned long flags;
1302 int was_on_pcie_device_list = 0;
1303 u8 update_latency = 0;
1305 if (ioc->shost_recovery)
1308 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1309 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1311 if (!list_empty(&pcie_device->list)) {
1312 list_del_init(&pcie_device->list);
1313 was_on_pcie_device_list = 1;
1314 pcie_device_put(pcie_device);
1316 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1319 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1320 if (was_on_pcie_device_list) {
1321 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1322 pcie_device_put(pcie_device);
1326 * This device's RTD3 Entry Latency matches IOC's
1327 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1328 * from the available drives as current drive is getting removed.
1331 _scsih_set_nvme_max_shutdown_latency(ioc);
1335 * _scsih_pcie_device_add - add pcie_device object
1336 * @ioc: per adapter object
1337 * @pcie_device: pcie_device object
1339 * This is added to the pcie_device_list link list.
1342 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1343 struct _pcie_device *pcie_device)
1345 unsigned long flags;
1348 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1350 pcie_device->handle, (u64)pcie_device->wwid));
1351 if (pcie_device->enclosure_handle != 0)
1353 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1355 (u64)pcie_device->enclosure_logical_id,
1356 pcie_device->slot));
1357 if (pcie_device->connector_name[0] != '\0')
1359 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1360 __func__, pcie_device->enclosure_level,
1361 pcie_device->connector_name));
1363 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1364 pcie_device_get(pcie_device);
1365 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1366 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1368 if (pcie_device->access_status ==
1369 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1370 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1373 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1374 _scsih_pcie_device_remove(ioc, pcie_device);
1375 } else if (!pcie_device->starget) {
1376 if (!ioc->is_driver_loading) {
1377 /*TODO-- Need to find out whether this condition will occur or not*/
1378 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1381 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1385 * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1386 * @ioc: per adapter object
1387 * @pcie_device: the pcie_device object
1388 * Context: This function will acquire ioc->pcie_device_lock.
1390 * Adding new object at driver load time to the ioc->pcie_device_init_list.
1393 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1394 struct _pcie_device *pcie_device)
1396 unsigned long flags;
1399 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1401 pcie_device->handle, (u64)pcie_device->wwid));
1402 if (pcie_device->enclosure_handle != 0)
1404 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1406 (u64)pcie_device->enclosure_logical_id,
1407 pcie_device->slot));
1408 if (pcie_device->connector_name[0] != '\0')
1410 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1411 __func__, pcie_device->enclosure_level,
1412 pcie_device->connector_name));
1414 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1415 pcie_device_get(pcie_device);
1416 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1417 if (pcie_device->access_status !=
1418 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1419 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1420 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1423 * _scsih_raid_device_find_by_id - raid device search
1424 * @ioc: per adapter object
1425 * @id: sas device target id
1426 * @channel: sas device channel
1427 * Context: Calling function should acquire ioc->raid_device_lock
1429 * This searches for raid_device based on target id, then return raid_device
1432 static struct _raid_device *
1433 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1435 struct _raid_device *raid_device, *r;
1438 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1439 if (raid_device->id == id && raid_device->channel == channel) {
1450 * mpt3sas_raid_device_find_by_handle - raid device search
1451 * @ioc: per adapter object
1452 * @handle: sas device handle (assigned by firmware)
1453 * Context: Calling function should acquire ioc->raid_device_lock
1455 * This searches for raid_device based on handle, then return raid_device
1458 struct _raid_device *
1459 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1461 struct _raid_device *raid_device, *r;
1464 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1465 if (raid_device->handle != handle)
1476 * _scsih_raid_device_find_by_wwid - raid device search
1477 * @ioc: per adapter object
1479 * Context: Calling function should acquire ioc->raid_device_lock
1481 * This searches for raid_device based on wwid, then return raid_device
1484 static struct _raid_device *
1485 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1487 struct _raid_device *raid_device, *r;
1490 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1491 if (raid_device->wwid != wwid)
1502 * _scsih_raid_device_add - add raid_device object
1503 * @ioc: per adapter object
1504 * @raid_device: raid_device object
1506 * This is added to the raid_device_list link list.
1509 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1510 struct _raid_device *raid_device)
1512 unsigned long flags;
1515 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1517 raid_device->handle, (u64)raid_device->wwid));
1519 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1520 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1521 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1525 * _scsih_raid_device_remove - delete raid_device object
1526 * @ioc: per adapter object
1527 * @raid_device: raid_device object
1531 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1532 struct _raid_device *raid_device)
1534 unsigned long flags;
1536 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1537 list_del(&raid_device->list);
1539 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1543 * mpt3sas_scsih_expander_find_by_handle - expander device search
1544 * @ioc: per adapter object
1545 * @handle: expander handle (assigned by firmware)
1546 * Context: Calling function should acquire ioc->sas_device_lock
1548 * This searches for expander device based on handle, then returns the
1552 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1554 struct _sas_node *sas_expander, *r;
1557 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1558 if (sas_expander->handle != handle)
1568 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1569 * @ioc: per adapter object
1570 * @handle: enclosure handle (assigned by firmware)
1571 * Context: Calling function should acquire ioc->sas_device_lock
1573 * This searches for enclosure device based on handle, then returns the
1576 static struct _enclosure_node *
1577 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1579 struct _enclosure_node *enclosure_dev, *r;
1582 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1583 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1592 * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1593 * @ioc: per adapter object
1594 * @sas_address: sas address
1595 * @port: hba port entry
1596 * Context: Calling function should acquire ioc->sas_node_lock.
1598 * This searches for expander device based on sas_address & port number,
1599 * then returns the sas_node object.
1602 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1603 u64 sas_address, struct hba_port *port)
1605 struct _sas_node *sas_expander, *r = NULL;
1610 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1611 if (sas_expander->sas_address != sas_address)
1613 if (sas_expander->port != port)
1623 * _scsih_expander_node_add - insert expander device to the list.
1624 * @ioc: per adapter object
1625 * @sas_expander: the sas_device object
1626 * Context: This function will acquire ioc->sas_node_lock.
1628 * Adding new object to the ioc->sas_expander_list.
1631 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1632 struct _sas_node *sas_expander)
1634 unsigned long flags;
1636 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1637 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1638 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1642 * _scsih_is_end_device - determines if device is an end device
1643 * @device_info: bitfield providing information about the device.
1646 * Return: 1 if end device.
1649 _scsih_is_end_device(u32 device_info)
1651 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1652 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1653 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1654 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1661 * _scsih_is_nvme_pciescsi_device - determines if
1662 * device is an pcie nvme/scsi device
1663 * @device_info: bitfield providing information about the device.
1666 * Returns 1 if device is pcie device type nvme/scsi.
1669 _scsih_is_nvme_pciescsi_device(u32 device_info)
1671 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1672 == MPI26_PCIE_DEVINFO_NVME) ||
1673 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1674 == MPI26_PCIE_DEVINFO_SCSI))
1681 * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1682 * @ioc: per adapter object
1685 * Context: This function will acquire ioc->scsi_lookup_lock.
1687 * This will search for a matching channel:id in the scsi_lookup array,
1688 * returning 1 if found.
1691 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1695 struct scsi_cmnd *scmd;
1698 smid <= ioc->shost->can_queue; smid++) {
1699 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1702 if (scmd->device->id == id &&
1703 scmd->device->channel == channel)
1710 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1711 * @ioc: per adapter object
1715 * Context: This function will acquire ioc->scsi_lookup_lock.
1717 * This will search for a matching channel:id:lun in the scsi_lookup array,
1718 * returning 1 if found.
1721 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1722 unsigned int lun, int channel)
1725 struct scsi_cmnd *scmd;
1727 for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1729 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1732 if (scmd->device->id == id &&
1733 scmd->device->channel == channel &&
1734 scmd->device->lun == lun)
1741 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1742 * @ioc: per adapter object
1743 * @smid: system request message index
1745 * Return: the smid stored scmd pointer.
1746 * Then will dereference the stored scmd pointer.
1749 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1751 struct scsi_cmnd *scmd = NULL;
1752 struct scsiio_tracker *st;
1753 Mpi25SCSIIORequest_t *mpi_request;
1757 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1759 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
1761 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1764 * If SCSI IO request is outstanding at driver level then
1765 * DevHandle filed must be non-zero. If DevHandle is zero
1766 * then it means that this smid is free at driver level,
1769 if (!mpi_request->DevHandle)
1772 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1774 st = scsi_cmd_priv(scmd);
1775 if (st->cb_idx == 0xFF || st->smid == 0)
1783 * scsih_change_queue_depth - setting device queue depth
1784 * @sdev: scsi device struct
1785 * @qdepth: requested queue depth
1787 * Return: queue depth.
1790 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1792 struct Scsi_Host *shost = sdev->host;
1794 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1795 struct MPT3SAS_DEVICE *sas_device_priv_data;
1796 struct MPT3SAS_TARGET *sas_target_priv_data;
1797 struct _sas_device *sas_device;
1798 unsigned long flags;
1800 max_depth = shost->can_queue;
1803 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1806 if (ioc->enable_sdev_max_qd)
1809 sas_device_priv_data = sdev->hostdata;
1810 if (!sas_device_priv_data)
1812 sas_target_priv_data = sas_device_priv_data->sas_target;
1813 if (!sas_target_priv_data)
1815 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1818 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1819 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1821 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1822 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1824 sas_device_put(sas_device);
1826 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1830 if (!sdev->tagged_supported)
1832 if (qdepth > max_depth)
1834 scsi_change_queue_depth(sdev, qdepth);
1835 sdev_printk(KERN_INFO, sdev,
1836 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1837 sdev->queue_depth, sdev->tagged_supported,
1838 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1839 return sdev->queue_depth;
1843 * mpt3sas_scsih_change_queue_depth - setting device queue depth
1844 * @sdev: scsi device struct
1845 * @qdepth: requested queue depth
1850 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1852 struct Scsi_Host *shost = sdev->host;
1853 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1855 if (ioc->enable_sdev_max_qd)
1856 qdepth = shost->can_queue;
1858 scsih_change_queue_depth(sdev, qdepth);
1862 * scsih_target_alloc - target add routine
1863 * @starget: scsi target struct
1865 * Return: 0 if ok. Any other return is assumed to be an error and
1866 * the device is ignored.
1869 scsih_target_alloc(struct scsi_target *starget)
1871 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1872 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1873 struct MPT3SAS_TARGET *sas_target_priv_data;
1874 struct _sas_device *sas_device;
1875 struct _raid_device *raid_device;
1876 struct _pcie_device *pcie_device;
1877 unsigned long flags;
1878 struct sas_rphy *rphy;
1880 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1882 if (!sas_target_priv_data)
1885 starget->hostdata = sas_target_priv_data;
1886 sas_target_priv_data->starget = starget;
1887 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1890 if (starget->channel == RAID_CHANNEL) {
1891 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1892 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1895 sas_target_priv_data->handle = raid_device->handle;
1896 sas_target_priv_data->sas_address = raid_device->wwid;
1897 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1898 if (ioc->is_warpdrive)
1899 sas_target_priv_data->raid_device = raid_device;
1900 raid_device->starget = starget;
1902 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1907 if (starget->channel == PCIE_CHANNEL) {
1908 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1909 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1912 sas_target_priv_data->handle = pcie_device->handle;
1913 sas_target_priv_data->sas_address = pcie_device->wwid;
1914 sas_target_priv_data->port = NULL;
1915 sas_target_priv_data->pcie_dev = pcie_device;
1916 pcie_device->starget = starget;
1917 pcie_device->id = starget->id;
1918 pcie_device->channel = starget->channel;
1919 sas_target_priv_data->flags |=
1920 MPT_TARGET_FLAGS_PCIE_DEVICE;
1921 if (pcie_device->fast_path)
1922 sas_target_priv_data->flags |=
1923 MPT_TARGET_FASTPATH_IO;
1925 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1929 /* sas/sata devices */
1930 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1931 rphy = dev_to_rphy(starget->dev.parent);
1932 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1935 sas_target_priv_data->handle = sas_device->handle;
1936 sas_target_priv_data->sas_address = sas_device->sas_address;
1937 sas_target_priv_data->port = sas_device->port;
1938 sas_target_priv_data->sas_dev = sas_device;
1939 sas_device->starget = starget;
1940 sas_device->id = starget->id;
1941 sas_device->channel = starget->channel;
1942 if (test_bit(sas_device->handle, ioc->pd_handles))
1943 sas_target_priv_data->flags |=
1944 MPT_TARGET_FLAGS_RAID_COMPONENT;
1945 if (sas_device->fast_path)
1946 sas_target_priv_data->flags |=
1947 MPT_TARGET_FASTPATH_IO;
1949 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1955 * scsih_target_destroy - target destroy routine
1956 * @starget: scsi target struct
1959 scsih_target_destroy(struct scsi_target *starget)
1961 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1962 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1963 struct MPT3SAS_TARGET *sas_target_priv_data;
1964 struct _sas_device *sas_device;
1965 struct _raid_device *raid_device;
1966 struct _pcie_device *pcie_device;
1967 unsigned long flags;
1969 sas_target_priv_data = starget->hostdata;
1970 if (!sas_target_priv_data)
1973 if (starget->channel == RAID_CHANNEL) {
1974 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1975 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1978 raid_device->starget = NULL;
1979 raid_device->sdev = NULL;
1981 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1985 if (starget->channel == PCIE_CHANNEL) {
1986 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1987 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1988 sas_target_priv_data);
1989 if (pcie_device && (pcie_device->starget == starget) &&
1990 (pcie_device->id == starget->id) &&
1991 (pcie_device->channel == starget->channel))
1992 pcie_device->starget = NULL;
1996 * Corresponding get() is in _scsih_target_alloc()
1998 sas_target_priv_data->pcie_dev = NULL;
1999 pcie_device_put(pcie_device);
2000 pcie_device_put(pcie_device);
2002 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2006 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2007 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
2008 if (sas_device && (sas_device->starget == starget) &&
2009 (sas_device->id == starget->id) &&
2010 (sas_device->channel == starget->channel))
2011 sas_device->starget = NULL;
2015 * Corresponding get() is in _scsih_target_alloc()
2017 sas_target_priv_data->sas_dev = NULL;
2018 sas_device_put(sas_device);
2020 sas_device_put(sas_device);
2022 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2025 kfree(sas_target_priv_data);
2026 starget->hostdata = NULL;
2030 * scsih_slave_alloc - device add routine
2031 * @sdev: scsi device struct
2033 * Return: 0 if ok. Any other return is assumed to be an error and
2034 * the device is ignored.
2037 scsih_slave_alloc(struct scsi_device *sdev)
2039 struct Scsi_Host *shost;
2040 struct MPT3SAS_ADAPTER *ioc;
2041 struct MPT3SAS_TARGET *sas_target_priv_data;
2042 struct MPT3SAS_DEVICE *sas_device_priv_data;
2043 struct scsi_target *starget;
2044 struct _raid_device *raid_device;
2045 struct _sas_device *sas_device;
2046 struct _pcie_device *pcie_device;
2047 unsigned long flags;
2049 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2051 if (!sas_device_priv_data)
2054 sas_device_priv_data->lun = sdev->lun;
2055 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2057 starget = scsi_target(sdev);
2058 sas_target_priv_data = starget->hostdata;
2059 sas_target_priv_data->num_luns++;
2060 sas_device_priv_data->sas_target = sas_target_priv_data;
2061 sdev->hostdata = sas_device_priv_data;
2062 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2063 sdev->no_uld_attach = 1;
2065 shost = dev_to_shost(&starget->dev);
2066 ioc = shost_priv(shost);
2067 if (starget->channel == RAID_CHANNEL) {
2068 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2069 raid_device = _scsih_raid_device_find_by_id(ioc,
2070 starget->id, starget->channel);
2072 raid_device->sdev = sdev; /* raid is single lun */
2073 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2075 if (starget->channel == PCIE_CHANNEL) {
2076 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2077 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2078 sas_target_priv_data->sas_address);
2079 if (pcie_device && (pcie_device->starget == NULL)) {
2080 sdev_printk(KERN_INFO, sdev,
2081 "%s : pcie_device->starget set to starget @ %d\n",
2082 __func__, __LINE__);
2083 pcie_device->starget = starget;
2087 pcie_device_put(pcie_device);
2088 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2090 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2091 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2092 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2093 sas_target_priv_data->sas_address,
2094 sas_target_priv_data->port);
2095 if (sas_device && (sas_device->starget == NULL)) {
2096 sdev_printk(KERN_INFO, sdev,
2097 "%s : sas_device->starget set to starget @ %d\n",
2098 __func__, __LINE__);
2099 sas_device->starget = starget;
2103 sas_device_put(sas_device);
2105 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2112 * scsih_slave_destroy - device destroy routine
2113 * @sdev: scsi device struct
2116 scsih_slave_destroy(struct scsi_device *sdev)
2118 struct MPT3SAS_TARGET *sas_target_priv_data;
2119 struct scsi_target *starget;
2120 struct Scsi_Host *shost;
2121 struct MPT3SAS_ADAPTER *ioc;
2122 struct _sas_device *sas_device;
2123 struct _pcie_device *pcie_device;
2124 unsigned long flags;
2126 if (!sdev->hostdata)
2129 starget = scsi_target(sdev);
2130 sas_target_priv_data = starget->hostdata;
2131 sas_target_priv_data->num_luns--;
2133 shost = dev_to_shost(&starget->dev);
2134 ioc = shost_priv(shost);
2136 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2137 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2138 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2139 sas_target_priv_data);
2140 if (pcie_device && !sas_target_priv_data->num_luns)
2141 pcie_device->starget = NULL;
2144 pcie_device_put(pcie_device);
2146 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2148 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2149 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2150 sas_device = __mpt3sas_get_sdev_from_target(ioc,
2151 sas_target_priv_data);
2152 if (sas_device && !sas_target_priv_data->num_luns)
2153 sas_device->starget = NULL;
2156 sas_device_put(sas_device);
2157 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2160 kfree(sdev->hostdata);
2161 sdev->hostdata = NULL;
2165 * _scsih_display_sata_capabilities - sata capabilities
2166 * @ioc: per adapter object
2167 * @handle: device handle
2168 * @sdev: scsi device struct
2171 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2172 u16 handle, struct scsi_device *sdev)
2174 Mpi2ConfigReply_t mpi_reply;
2175 Mpi2SasDevicePage0_t sas_device_pg0;
2180 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2181 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2182 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2183 __FILE__, __LINE__, __func__);
2187 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2188 MPI2_IOCSTATUS_MASK;
2189 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2190 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2191 __FILE__, __LINE__, __func__);
2195 flags = le16_to_cpu(sas_device_pg0.Flags);
2196 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2198 sdev_printk(KERN_INFO, sdev,
2199 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2200 "sw_preserve(%s)\n",
2201 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2202 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2203 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2206 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2207 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2211 * raid transport support -
2212 * Enabled for SLES11 and newer, in older kernels the driver will panic when
2213 * unloading the driver followed by a load - I believe that the subroutine
2214 * raid_class_release() is not cleaning up properly.
2218 * scsih_is_raid - return boolean indicating device is raid volume
2219 * @dev: the device struct object
2222 scsih_is_raid(struct device *dev)
2224 struct scsi_device *sdev = to_scsi_device(dev);
2225 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2227 if (ioc->is_warpdrive)
2229 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2233 scsih_is_nvme(struct device *dev)
2235 struct scsi_device *sdev = to_scsi_device(dev);
2237 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2241 * scsih_get_resync - get raid volume resync percent complete
2242 * @dev: the device struct object
2245 scsih_get_resync(struct device *dev)
2247 struct scsi_device *sdev = to_scsi_device(dev);
2248 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2249 static struct _raid_device *raid_device;
2250 unsigned long flags;
2251 Mpi2RaidVolPage0_t vol_pg0;
2252 Mpi2ConfigReply_t mpi_reply;
2253 u32 volume_status_flags;
2254 u8 percent_complete;
2257 percent_complete = 0;
2259 if (ioc->is_warpdrive)
2262 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2263 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2266 handle = raid_device->handle;
2267 percent_complete = raid_device->percent_complete;
2269 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2274 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2275 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2276 sizeof(Mpi2RaidVolPage0_t))) {
2277 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2278 __FILE__, __LINE__, __func__);
2279 percent_complete = 0;
2283 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2284 if (!(volume_status_flags &
2285 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2286 percent_complete = 0;
2290 switch (ioc->hba_mpi_version_belonged) {
2292 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2296 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2302 * scsih_get_state - get raid volume level
2303 * @dev: the device struct object
2306 scsih_get_state(struct device *dev)
2308 struct scsi_device *sdev = to_scsi_device(dev);
2309 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2310 static struct _raid_device *raid_device;
2311 unsigned long flags;
2312 Mpi2RaidVolPage0_t vol_pg0;
2313 Mpi2ConfigReply_t mpi_reply;
2315 enum raid_state state = RAID_STATE_UNKNOWN;
2318 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2319 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2322 handle = raid_device->handle;
2323 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2328 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2329 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2330 sizeof(Mpi2RaidVolPage0_t))) {
2331 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2332 __FILE__, __LINE__, __func__);
2336 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2337 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2338 state = RAID_STATE_RESYNCING;
2342 switch (vol_pg0.VolumeState) {
2343 case MPI2_RAID_VOL_STATE_OPTIMAL:
2344 case MPI2_RAID_VOL_STATE_ONLINE:
2345 state = RAID_STATE_ACTIVE;
2347 case MPI2_RAID_VOL_STATE_DEGRADED:
2348 state = RAID_STATE_DEGRADED;
2350 case MPI2_RAID_VOL_STATE_FAILED:
2351 case MPI2_RAID_VOL_STATE_MISSING:
2352 state = RAID_STATE_OFFLINE;
2356 switch (ioc->hba_mpi_version_belonged) {
2358 raid_set_state(mpt2sas_raid_template, dev, state);
2362 raid_set_state(mpt3sas_raid_template, dev, state);
2368 * _scsih_set_level - set raid level
2370 * @sdev: scsi device struct
2371 * @volume_type: volume type
2374 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2375 struct scsi_device *sdev, u8 volume_type)
2377 enum raid_level level = RAID_LEVEL_UNKNOWN;
2379 switch (volume_type) {
2380 case MPI2_RAID_VOL_TYPE_RAID0:
2381 level = RAID_LEVEL_0;
2383 case MPI2_RAID_VOL_TYPE_RAID10:
2384 level = RAID_LEVEL_10;
2386 case MPI2_RAID_VOL_TYPE_RAID1E:
2387 level = RAID_LEVEL_1E;
2389 case MPI2_RAID_VOL_TYPE_RAID1:
2390 level = RAID_LEVEL_1;
2394 switch (ioc->hba_mpi_version_belonged) {
2396 raid_set_level(mpt2sas_raid_template,
2397 &sdev->sdev_gendev, level);
2401 raid_set_level(mpt3sas_raid_template,
2402 &sdev->sdev_gendev, level);
2409 * _scsih_get_volume_capabilities - volume capabilities
2410 * @ioc: per adapter object
2411 * @raid_device: the raid_device object
2413 * Return: 0 for success, else 1
2416 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2417 struct _raid_device *raid_device)
2419 Mpi2RaidVolPage0_t *vol_pg0;
2420 Mpi2RaidPhysDiskPage0_t pd_pg0;
2421 Mpi2SasDevicePage0_t sas_device_pg0;
2422 Mpi2ConfigReply_t mpi_reply;
2426 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2427 &num_pds)) || !num_pds) {
2429 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2430 __FILE__, __LINE__, __func__));
2434 raid_device->num_pds = num_pds;
2435 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2436 sizeof(Mpi2RaidVol0PhysDisk_t));
2437 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2440 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2441 __FILE__, __LINE__, __func__));
2445 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2446 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2448 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2449 __FILE__, __LINE__, __func__));
2454 raid_device->volume_type = vol_pg0->VolumeType;
2456 /* figure out what the underlying devices are by
2457 * obtaining the device_info bits for the 1st device
2459 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2460 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2461 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2462 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2463 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2464 le16_to_cpu(pd_pg0.DevHandle)))) {
2465 raid_device->device_info =
2466 le32_to_cpu(sas_device_pg0.DeviceInfo);
2475 * _scsih_enable_tlr - setting TLR flags
2476 * @ioc: per adapter object
2477 * @sdev: scsi device struct
2479 * Enabling Transaction Layer Retries for tape devices when
2480 * vpd page 0x90 is present
2484 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2488 if (sdev->type != TYPE_TAPE)
2491 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2494 sas_enable_tlr(sdev);
2495 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2496 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2502 * scsih_slave_configure - device configure routine.
2503 * @sdev: scsi device struct
2505 * Return: 0 if ok. Any other return is assumed to be an error and
2506 * the device is ignored.
2509 scsih_slave_configure(struct scsi_device *sdev)
2511 struct Scsi_Host *shost = sdev->host;
2512 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2513 struct MPT3SAS_DEVICE *sas_device_priv_data;
2514 struct MPT3SAS_TARGET *sas_target_priv_data;
2515 struct _sas_device *sas_device;
2516 struct _pcie_device *pcie_device;
2517 struct _raid_device *raid_device;
2518 unsigned long flags;
2523 u16 handle, volume_handle = 0;
2524 u64 volume_wwid = 0;
2527 sas_device_priv_data = sdev->hostdata;
2528 sas_device_priv_data->configured_lun = 1;
2529 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2530 sas_target_priv_data = sas_device_priv_data->sas_target;
2531 handle = sas_target_priv_data->handle;
2533 /* raid volume handling */
2534 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2536 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2537 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2538 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2541 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2542 __FILE__, __LINE__, __func__));
2546 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2548 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2549 __FILE__, __LINE__, __func__));
2554 * WARPDRIVE: Initialize the required data for Direct IO
2556 mpt3sas_init_warpdrive_properties(ioc, raid_device);
2558 /* RAID Queue Depth Support
2559 * IS volume = underlying qdepth of drive type, either
2560 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2561 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2563 if (raid_device->device_info &
2564 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2565 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2568 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2569 if (raid_device->device_info &
2570 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2576 switch (raid_device->volume_type) {
2577 case MPI2_RAID_VOL_TYPE_RAID0:
2580 case MPI2_RAID_VOL_TYPE_RAID1E:
2581 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2582 if (ioc->manu_pg10.OEMIdentifier &&
2583 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2584 MFG10_GF0_R10_DISPLAY) &&
2585 !(raid_device->num_pds % 2))
2590 case MPI2_RAID_VOL_TYPE_RAID1:
2591 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2594 case MPI2_RAID_VOL_TYPE_RAID10:
2595 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2598 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2600 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2605 if (!ioc->hide_ir_msg)
2606 sdev_printk(KERN_INFO, sdev,
2607 "%s: handle(0x%04x), wwid(0x%016llx),"
2608 " pd_count(%d), type(%s)\n",
2609 r_level, raid_device->handle,
2610 (unsigned long long)raid_device->wwid,
2611 raid_device->num_pds, ds);
2613 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2614 blk_queue_max_hw_sectors(sdev->request_queue,
2615 MPT3SAS_RAID_MAX_SECTORS);
2616 sdev_printk(KERN_INFO, sdev,
2617 "Set queue's max_sector to: %u\n",
2618 MPT3SAS_RAID_MAX_SECTORS);
2621 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2623 /* raid transport support */
2624 if (!ioc->is_warpdrive)
2625 _scsih_set_level(ioc, sdev, raid_device->volume_type);
2629 /* non-raid handling */
2630 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2631 if (mpt3sas_config_get_volume_handle(ioc, handle,
2634 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2635 __FILE__, __LINE__, __func__));
2638 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2639 volume_handle, &volume_wwid)) {
2641 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2642 __FILE__, __LINE__, __func__));
2648 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2649 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2650 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2651 sas_device_priv_data->sas_target->sas_address);
2653 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2655 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2656 __FILE__, __LINE__, __func__));
2660 qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2662 sdev_printk(KERN_INFO, sdev,
2663 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2664 ds, handle, (unsigned long long)pcie_device->wwid,
2665 pcie_device->port_num);
2666 if (pcie_device->enclosure_handle != 0)
2667 sdev_printk(KERN_INFO, sdev,
2668 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2670 (unsigned long long)pcie_device->enclosure_logical_id,
2672 if (pcie_device->connector_name[0] != '\0')
2673 sdev_printk(KERN_INFO, sdev,
2674 "%s: enclosure level(0x%04x),"
2675 "connector name( %s)\n", ds,
2676 pcie_device->enclosure_level,
2677 pcie_device->connector_name);
2679 if (pcie_device->nvme_mdts)
2680 blk_queue_max_hw_sectors(sdev->request_queue,
2681 pcie_device->nvme_mdts/512);
2683 pcie_device_put(pcie_device);
2684 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2685 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2686 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2687 ** merged and can eliminate holes created during merging
2690 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2691 sdev->request_queue);
2692 blk_queue_virt_boundary(sdev->request_queue,
2693 ioc->page_size - 1);
2697 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2698 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2699 sas_device_priv_data->sas_target->sas_address,
2700 sas_device_priv_data->sas_target->port);
2702 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2704 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2705 __FILE__, __LINE__, __func__));
2709 sas_device->volume_handle = volume_handle;
2710 sas_device->volume_wwid = volume_wwid;
2711 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2712 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2714 if (sas_device->device_info &
2715 MPI2_SAS_DEVICE_INFO_SEP) {
2716 sdev_printk(KERN_WARNING, sdev,
2717 "set ignore_delay_remove for handle(0x%04x)\n",
2718 sas_device_priv_data->sas_target->handle);
2719 sas_device_priv_data->ignore_delay_remove = 1;
2724 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2725 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2727 else if (sas_device->device_info &
2728 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2732 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2733 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2734 ds, handle, (unsigned long long)sas_device->sas_address,
2735 sas_device->phy, (unsigned long long)sas_device->device_name);
2737 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2739 sas_device_put(sas_device);
2740 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2743 _scsih_display_sata_capabilities(ioc, handle, sdev);
2746 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2749 sas_read_port_mode_page(sdev);
2750 _scsih_enable_tlr(ioc, sdev);
2757 * scsih_bios_param - fetch head, sector, cylinder info for a disk
2758 * @sdev: scsi device struct
2759 * @bdev: pointer to block device context
2760 * @capacity: device size (in 512 byte sectors)
2761 * @params: three element array to place output:
2762 * params[0] number of heads (max 255)
2763 * params[1] number of sectors (max 63)
2764 * params[2] number of cylinders
2767 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2768 sector_t capacity, int params[])
2778 dummy = heads * sectors;
2779 cylinders = capacity;
2780 sector_div(cylinders, dummy);
2783 * Handle extended translation size for logical drives
2786 if ((ulong)capacity >= 0x200000) {
2789 dummy = heads * sectors;
2790 cylinders = capacity;
2791 sector_div(cylinders, dummy);
2796 params[1] = sectors;
2797 params[2] = cylinders;
2803 * _scsih_response_code - translation of device response code
2804 * @ioc: per adapter object
2805 * @response_code: response code returned by the device
2808 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2812 switch (response_code) {
2813 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2814 desc = "task management request completed";
2816 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2817 desc = "invalid frame";
2819 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2820 desc = "task management request not supported";
2822 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2823 desc = "task management request failed";
2825 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2826 desc = "task management request succeeded";
2828 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2829 desc = "invalid lun";
2832 desc = "overlapped tag attempted";
2834 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2835 desc = "task queued, however not sent to target";
2841 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2845 * _scsih_tm_done - tm completion routine
2846 * @ioc: per adapter object
2847 * @smid: system request message index
2848 * @msix_index: MSIX table index supplied by the OS
2849 * @reply: reply message frame(lower 32bit addr)
2852 * The callback handler when using scsih_issue_tm.
2854 * Return: 1 meaning mf should be freed from _base_interrupt
2855 * 0 means the mf is freed from this function.
2858 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2860 MPI2DefaultReply_t *mpi_reply;
2862 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2864 if (ioc->tm_cmds.smid != smid)
2866 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2867 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2869 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2870 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2872 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2873 complete(&ioc->tm_cmds.done);
2878 * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2879 * @ioc: per adapter object
2880 * @handle: device handle
2882 * During taskmangement request, we need to freeze the device queue.
2885 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2887 struct MPT3SAS_DEVICE *sas_device_priv_data;
2888 struct scsi_device *sdev;
2891 shost_for_each_device(sdev, ioc->shost) {
2894 sas_device_priv_data = sdev->hostdata;
2895 if (!sas_device_priv_data)
2897 if (sas_device_priv_data->sas_target->handle == handle) {
2898 sas_device_priv_data->sas_target->tm_busy = 1;
2900 ioc->ignore_loginfos = 1;
2906 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2907 * @ioc: per adapter object
2908 * @handle: device handle
2910 * During taskmangement request, we need to freeze the device queue.
2913 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2915 struct MPT3SAS_DEVICE *sas_device_priv_data;
2916 struct scsi_device *sdev;
2919 shost_for_each_device(sdev, ioc->shost) {
2922 sas_device_priv_data = sdev->hostdata;
2923 if (!sas_device_priv_data)
2925 if (sas_device_priv_data->sas_target->handle == handle) {
2926 sas_device_priv_data->sas_target->tm_busy = 0;
2928 ioc->ignore_loginfos = 0;
2934 * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2935 * @ioc: per adapter object
2936 * @channel: the channel assigned by the OS
2937 * @id: the id assigned by the OS
2939 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2940 * @smid_task: smid assigned to the task
2942 * Look whether TM has aborted the timed out SCSI command, if
2943 * TM has aborted the IO then return SUCCESS else return FAILED.
2946 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2947 uint id, uint lun, u8 type, u16 smid_task)
2950 if (smid_task <= ioc->shost->can_queue) {
2952 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2953 if (!(_scsih_scsi_lookup_find_by_target(ioc,
2957 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2958 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2959 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2966 } else if (smid_task == ioc->scsih_cmds.smid) {
2967 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2968 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2970 } else if (smid_task == ioc->ctl_cmds.smid) {
2971 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2972 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2980 * scsih_tm_post_processing - post processing of target & LUN reset
2981 * @ioc: per adapter object
2982 * @handle: device handle
2983 * @channel: the channel assigned by the OS
2984 * @id: the id assigned by the OS
2986 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2987 * @smid_task: smid assigned to the task
2989 * Post processing of target & LUN reset. Due to interrupt latency
2990 * issue it possible that interrupt for aborted IO might not be
2991 * received yet. So before returning failure status, poll the
2992 * reply descriptor pools for the reply of timed out SCSI command.
2993 * Return FAILED status if reply for timed out is not received
2994 * otherwise return SUCCESS.
2997 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2998 uint channel, uint id, uint lun, u8 type, u16 smid_task)
3002 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3007 "Poll ReplyDescriptor queues for completion of"
3008 " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3009 smid_task, type, handle);
3012 * Due to interrupt latency issues, driver may receive interrupt for
3013 * TM first and then for aborted SCSI IO command. So, poll all the
3014 * ReplyDescriptor pools before returning the FAILED status to SML.
3016 mpt3sas_base_mask_interrupts(ioc);
3017 mpt3sas_base_sync_reply_irqs(ioc, 1);
3018 mpt3sas_base_unmask_interrupts(ioc);
3020 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3024 * mpt3sas_scsih_issue_tm - main routine for sending tm requests
3025 * @ioc: per adapter struct
3026 * @handle: device handle
3027 * @channel: the channel assigned by the OS
3028 * @id: the id assigned by the OS
3030 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3031 * @smid_task: smid assigned to the task
3032 * @msix_task: MSIX table index supplied by the OS
3033 * @timeout: timeout in seconds
3034 * @tr_method: Target Reset Method
3037 * A generic API for sending task management requests to firmware.
3039 * The callback index is set inside `ioc->tm_cb_idx`.
3040 * The caller is responsible to check for outstanding commands.
3042 * Return: SUCCESS or FAILED.
3045 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3046 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3047 u8 timeout, u8 tr_method)
3049 Mpi2SCSITaskManagementRequest_t *mpi_request;
3050 Mpi2SCSITaskManagementReply_t *mpi_reply;
3051 Mpi25SCSIIORequest_t *request;
3057 lockdep_assert_held(&ioc->tm_cmds.mutex);
3059 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3060 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3064 if (ioc->shost_recovery || ioc->remove_host ||
3065 ioc->pci_error_recovery) {
3066 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3070 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3071 if (ioc_state & MPI2_DOORBELL_USED) {
3072 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3073 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3074 return (!rc) ? SUCCESS : FAILED;
3077 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3078 mpt3sas_print_fault_code(ioc, ioc_state &
3079 MPI2_DOORBELL_DATA_MASK);
3080 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3081 return (!rc) ? SUCCESS : FAILED;
3082 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3083 MPI2_IOC_STATE_COREDUMP) {
3084 mpt3sas_print_coredump_info(ioc, ioc_state &
3085 MPI2_DOORBELL_DATA_MASK);
3086 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3087 return (!rc) ? SUCCESS : FAILED;
3090 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3092 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3097 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3098 handle, type, smid_task, timeout, tr_method));
3099 ioc->tm_cmds.status = MPT3_CMD_PENDING;
3100 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3101 ioc->tm_cmds.smid = smid;
3102 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3103 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3104 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3105 mpi_request->DevHandle = cpu_to_le16(handle);
3106 mpi_request->TaskType = type;
3107 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3108 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3109 mpi_request->MsgFlags = tr_method;
3110 mpi_request->TaskMID = cpu_to_le16(smid_task);
3111 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3112 mpt3sas_scsih_set_tm_flag(ioc, handle);
3113 init_completion(&ioc->tm_cmds.done);
3114 ioc->put_smid_hi_priority(ioc, smid, msix_task);
3115 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3116 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3117 mpt3sas_check_cmd_timeout(ioc,
3118 ioc->tm_cmds.status, mpi_request,
3119 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3121 rc = mpt3sas_base_hard_reset_handler(ioc,
3123 rc = (!rc) ? SUCCESS : FAILED;
3128 /* sync IRQs in case those were busy during flush. */
3129 mpt3sas_base_sync_reply_irqs(ioc, 0);
3131 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3132 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3133 mpi_reply = ioc->tm_cmds.reply;
3135 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3136 le16_to_cpu(mpi_reply->IOCStatus),
3137 le32_to_cpu(mpi_reply->IOCLogInfo),
3138 le32_to_cpu(mpi_reply->TerminationCount)));
3139 if (ioc->logging_level & MPT_DEBUG_TM) {
3140 _scsih_response_code(ioc, mpi_reply->ResponseCode);
3141 if (mpi_reply->IOCStatus)
3142 _debug_dump_mf(mpi_request,
3143 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3148 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3151 * If DevHandle filed in smid_task's entry of request pool
3152 * doesn't match with device handle on which this task abort
3153 * TM is received then it means that TM has successfully
3154 * aborted the timed out command. Since smid_task's entry in
3155 * request pool will be memset to zero once the timed out
3156 * command is returned to the SML. If the command is not
3157 * aborted then smid_task’s entry won’t be cleared and it
3158 * will have same DevHandle value on which this task abort TM
3159 * is received and driver will return the TM status as FAILED.
3161 request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3162 if (le16_to_cpu(request->DevHandle) != handle)
3165 ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3166 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3167 handle, timeout, tr_method, smid_task, msix_task);
3171 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3172 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3173 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3174 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3177 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3186 mpt3sas_scsih_clear_tm_flag(ioc, handle);
3187 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3191 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3192 uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3193 u16 msix_task, u8 timeout, u8 tr_method)
3197 mutex_lock(&ioc->tm_cmds.mutex);
3198 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3199 smid_task, msix_task, timeout, tr_method);
3200 mutex_unlock(&ioc->tm_cmds.mutex);
3206 * _scsih_tm_display_info - displays info about the device
3207 * @ioc: per adapter struct
3208 * @scmd: pointer to scsi command object
3210 * Called by task management callback handlers.
3213 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3215 struct scsi_target *starget = scmd->device->sdev_target;
3216 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3217 struct _sas_device *sas_device = NULL;
3218 struct _pcie_device *pcie_device = NULL;
3219 unsigned long flags;
3220 char *device_str = NULL;
3224 if (ioc->hide_ir_msg)
3225 device_str = "WarpDrive";
3227 device_str = "volume";
3229 scsi_print_command(scmd);
3230 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3231 starget_printk(KERN_INFO, starget,
3232 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
3233 device_str, priv_target->handle,
3234 device_str, (unsigned long long)priv_target->sas_address);
3236 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3237 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3238 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3240 starget_printk(KERN_INFO, starget,
3241 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3242 pcie_device->handle,
3243 (unsigned long long)pcie_device->wwid,
3244 pcie_device->port_num);
3245 if (pcie_device->enclosure_handle != 0)
3246 starget_printk(KERN_INFO, starget,
3247 "enclosure logical id(0x%016llx), slot(%d)\n",
3248 (unsigned long long)
3249 pcie_device->enclosure_logical_id,
3251 if (pcie_device->connector_name[0] != '\0')
3252 starget_printk(KERN_INFO, starget,
3253 "enclosure level(0x%04x), connector name( %s)\n",
3254 pcie_device->enclosure_level,
3255 pcie_device->connector_name);
3256 pcie_device_put(pcie_device);
3258 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3261 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3262 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3264 if (priv_target->flags &
3265 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3266 starget_printk(KERN_INFO, starget,
3267 "volume handle(0x%04x), "
3268 "volume wwid(0x%016llx)\n",
3269 sas_device->volume_handle,
3270 (unsigned long long)sas_device->volume_wwid);
3272 starget_printk(KERN_INFO, starget,
3273 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3275 (unsigned long long)sas_device->sas_address,
3278 _scsih_display_enclosure_chassis_info(NULL, sas_device,
3281 sas_device_put(sas_device);
3283 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3288 * scsih_abort - eh threads main abort routine
3289 * @scmd: pointer to scsi command object
3291 * Return: SUCCESS if command aborted else FAILED
3294 scsih_abort(struct scsi_cmnd *scmd)
3296 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3297 struct MPT3SAS_DEVICE *sas_device_priv_data;
3298 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3303 struct _pcie_device *pcie_device = NULL;
3304 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3305 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3306 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3307 (scmd->request->timeout / HZ) * 1000);
3308 _scsih_tm_display_info(ioc, scmd);
3310 sas_device_priv_data = scmd->device->hostdata;
3311 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3313 sdev_printk(KERN_INFO, scmd->device,
3314 "device been deleted! scmd(0x%p)\n", scmd);
3315 scmd->result = DID_NO_CONNECT << 16;
3316 scmd->scsi_done(scmd);
3321 /* check for completed command */
3322 if (st == NULL || st->cb_idx == 0xFF) {
3323 sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3324 "driver, assuming scmd(0x%p) might have completed\n", scmd);
3325 scmd->result = DID_RESET << 16;
3330 /* for hidden raid components and volumes this is not supported */
3331 if (sas_device_priv_data->sas_target->flags &
3332 MPT_TARGET_FLAGS_RAID_COMPONENT ||
3333 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3334 scmd->result = DID_RESET << 16;
3339 mpt3sas_halt_firmware(ioc);
3341 handle = sas_device_priv_data->sas_target->handle;
3342 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3343 if (pcie_device && (!ioc->tm_custom_handling) &&
3344 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3345 timeout = ioc->nvme_abort_timeout;
3346 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3347 scmd->device->id, scmd->device->lun,
3348 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3349 st->smid, st->msix_io, timeout, 0);
3350 /* Command must be cleared after abort */
3351 if (r == SUCCESS && st->cb_idx != 0xFF)
3354 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3355 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3357 pcie_device_put(pcie_device);
3362 * scsih_dev_reset - eh threads main device reset routine
3363 * @scmd: pointer to scsi command object
3365 * Return: SUCCESS if command aborted else FAILED
3368 scsih_dev_reset(struct scsi_cmnd *scmd)
3370 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3371 struct MPT3SAS_DEVICE *sas_device_priv_data;
3372 struct _sas_device *sas_device = NULL;
3373 struct _pcie_device *pcie_device = NULL;
3379 struct scsi_target *starget = scmd->device->sdev_target;
3380 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3382 sdev_printk(KERN_INFO, scmd->device,
3383 "attempting device reset! scmd(0x%p)\n", scmd);
3384 _scsih_tm_display_info(ioc, scmd);
3386 sas_device_priv_data = scmd->device->hostdata;
3387 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3389 sdev_printk(KERN_INFO, scmd->device,
3390 "device been deleted! scmd(0x%p)\n", scmd);
3391 scmd->result = DID_NO_CONNECT << 16;
3392 scmd->scsi_done(scmd);
3397 /* for hidden raid components obtain the volume_handle */
3399 if (sas_device_priv_data->sas_target->flags &
3400 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3401 sas_device = mpt3sas_get_sdev_from_target(ioc,
3404 handle = sas_device->volume_handle;
3406 handle = sas_device_priv_data->sas_target->handle;
3409 scmd->result = DID_RESET << 16;
3414 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3416 if (pcie_device && (!ioc->tm_custom_handling) &&
3417 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3418 tr_timeout = pcie_device->reset_timeout;
3419 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3421 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3423 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3424 scmd->device->id, scmd->device->lun,
3425 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3426 tr_timeout, tr_method);
3427 /* Check for busy commands after reset */
3428 if (r == SUCCESS && scsi_device_busy(scmd->device))
3431 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3432 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3435 sas_device_put(sas_device);
3437 pcie_device_put(pcie_device);
3443 * scsih_target_reset - eh threads main target reset routine
3444 * @scmd: pointer to scsi command object
3446 * Return: SUCCESS if command aborted else FAILED
3449 scsih_target_reset(struct scsi_cmnd *scmd)
3451 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3452 struct MPT3SAS_DEVICE *sas_device_priv_data;
3453 struct _sas_device *sas_device = NULL;
3454 struct _pcie_device *pcie_device = NULL;
3459 struct scsi_target *starget = scmd->device->sdev_target;
3460 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3462 starget_printk(KERN_INFO, starget,
3463 "attempting target reset! scmd(0x%p)\n", scmd);
3464 _scsih_tm_display_info(ioc, scmd);
3466 sas_device_priv_data = scmd->device->hostdata;
3467 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3469 starget_printk(KERN_INFO, starget,
3470 "target been deleted! scmd(0x%p)\n", scmd);
3471 scmd->result = DID_NO_CONNECT << 16;
3472 scmd->scsi_done(scmd);
3477 /* for hidden raid components obtain the volume_handle */
3479 if (sas_device_priv_data->sas_target->flags &
3480 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3481 sas_device = mpt3sas_get_sdev_from_target(ioc,
3484 handle = sas_device->volume_handle;
3486 handle = sas_device_priv_data->sas_target->handle;
3489 scmd->result = DID_RESET << 16;
3494 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3496 if (pcie_device && (!ioc->tm_custom_handling) &&
3497 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3498 tr_timeout = pcie_device->reset_timeout;
3499 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3501 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3502 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3503 scmd->device->id, 0,
3504 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3505 tr_timeout, tr_method);
3506 /* Check for busy commands after reset */
3507 if (r == SUCCESS && atomic_read(&starget->target_busy))
3510 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3511 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3514 sas_device_put(sas_device);
3516 pcie_device_put(pcie_device);
3522 * scsih_host_reset - eh threads main host reset routine
3523 * @scmd: pointer to scsi command object
3525 * Return: SUCCESS if command aborted else FAILED
3528 scsih_host_reset(struct scsi_cmnd *scmd)
3530 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3533 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3534 scsi_print_command(scmd);
3536 if (ioc->is_driver_loading || ioc->remove_host) {
3537 ioc_info(ioc, "Blocking the host reset\n");
3542 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3543 r = (retval < 0) ? FAILED : SUCCESS;
3545 ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3546 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3552 * _scsih_fw_event_add - insert and queue up fw_event
3553 * @ioc: per adapter object
3554 * @fw_event: object describing the event
3555 * Context: This function will acquire ioc->fw_event_lock.
3557 * This adds the firmware event object into link list, then queues it up to
3558 * be processed from user context.
3561 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3563 unsigned long flags;
3565 if (ioc->firmware_event_thread == NULL)
3568 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3569 fw_event_work_get(fw_event);
3570 INIT_LIST_HEAD(&fw_event->list);
3571 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3572 INIT_WORK(&fw_event->work, _firmware_event_work);
3573 fw_event_work_get(fw_event);
3574 queue_work(ioc->firmware_event_thread, &fw_event->work);
3575 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3579 * _scsih_fw_event_del_from_list - delete fw_event from the list
3580 * @ioc: per adapter object
3581 * @fw_event: object describing the event
3582 * Context: This function will acquire ioc->fw_event_lock.
3584 * If the fw_event is on the fw_event_list, remove it and do a put.
3587 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3590 unsigned long flags;
3592 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3593 if (!list_empty(&fw_event->list)) {
3594 list_del_init(&fw_event->list);
3595 fw_event_work_put(fw_event);
3597 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3602 * mpt3sas_send_trigger_data_event - send event for processing trigger data
3603 * @ioc: per adapter object
3604 * @event_data: trigger event data
3607 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3608 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3610 struct fw_event_work *fw_event;
3613 if (ioc->is_driver_loading)
3615 sz = sizeof(*event_data);
3616 fw_event = alloc_fw_event_work(sz);
3619 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3620 fw_event->ioc = ioc;
3621 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3622 _scsih_fw_event_add(ioc, fw_event);
3623 fw_event_work_put(fw_event);
3627 * _scsih_error_recovery_delete_devices - remove devices not responding
3628 * @ioc: per adapter object
3631 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3633 struct fw_event_work *fw_event;
3635 fw_event = alloc_fw_event_work(0);
3638 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3639 fw_event->ioc = ioc;
3640 _scsih_fw_event_add(ioc, fw_event);
3641 fw_event_work_put(fw_event);
3645 * mpt3sas_port_enable_complete - port enable completed (fake event)
3646 * @ioc: per adapter object
3649 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3651 struct fw_event_work *fw_event;
3653 fw_event = alloc_fw_event_work(0);
3656 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3657 fw_event->ioc = ioc;
3658 _scsih_fw_event_add(ioc, fw_event);
3659 fw_event_work_put(fw_event);
3662 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3664 unsigned long flags;
3665 struct fw_event_work *fw_event = NULL;
3667 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3668 if (!list_empty(&ioc->fw_event_list)) {
3669 fw_event = list_first_entry(&ioc->fw_event_list,
3670 struct fw_event_work, list);
3671 list_del_init(&fw_event->list);
3673 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3679 * _scsih_fw_event_cleanup_queue - cleanup event queue
3680 * @ioc: per adapter object
3682 * Walk the firmware event queue, either killing timers, or waiting
3683 * for outstanding events to complete
3685 * Context: task, can sleep
3688 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3690 struct fw_event_work *fw_event;
3692 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3693 !ioc->firmware_event_thread)
3696 * Set current running event as ignore, so that
3697 * current running event will exit quickly.
3698 * As diag reset has occurred it is of no use
3699 * to process remaining stale event data entries.
3701 if (ioc->shost_recovery && ioc->current_event)
3702 ioc->current_event->ignore = 1;
3704 ioc->fw_events_cleanup = 1;
3705 while ((fw_event = dequeue_next_fw_event(ioc)) ||
3706 (fw_event = ioc->current_event)) {
3709 * Don't call cancel_work_sync() for current_event
3710 * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3711 * otherwise we may observe deadlock if current
3712 * hard reset issued as part of processing the current_event.
3714 * Orginal logic of cleaning the current_event is added
3715 * for handling the back to back host reset issued by the user.
3716 * i.e. during back to back host reset, driver use to process
3717 * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
3718 * event back to back and this made the drives to unregister
3719 * the devices from SML.
3722 if (fw_event == ioc->current_event &&
3723 ioc->current_event->event !=
3724 MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
3725 ioc->current_event = NULL;
3730 * Driver has to clear ioc->start_scan flag when
3731 * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE,
3732 * otherwise scsi_scan_host() API waits for the
3733 * 5 minute timer to expire. If we exit from
3734 * scsi_scan_host() early then we can issue the
3735 * new port enable request as part of current diag reset.
3737 if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
3738 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
3739 ioc->start_scan = 0;
3743 * Wait on the fw_event to complete. If this returns 1, then
3744 * the event was never executed, and we need a put for the
3745 * reference the work had on the fw_event.
3747 * If it did execute, we wait for it to finish, and the put will
3748 * happen from _firmware_event_work()
3750 if (cancel_work_sync(&fw_event->work))
3751 fw_event_work_put(fw_event);
3753 fw_event_work_put(fw_event);
3755 ioc->fw_events_cleanup = 0;
3759 * _scsih_internal_device_block - block the sdev device
3760 * @sdev: per device object
3761 * @sas_device_priv_data : per device driver private data
3763 * make sure device is blocked without error, if not
3767 _scsih_internal_device_block(struct scsi_device *sdev,
3768 struct MPT3SAS_DEVICE *sas_device_priv_data)
3772 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3773 sas_device_priv_data->sas_target->handle);
3774 sas_device_priv_data->block = 1;
3776 r = scsi_internal_device_block_nowait(sdev);
3778 sdev_printk(KERN_WARNING, sdev,
3779 "device_block failed with return(%d) for handle(0x%04x)\n",
3780 r, sas_device_priv_data->sas_target->handle);
3784 * _scsih_internal_device_unblock - unblock the sdev device
3785 * @sdev: per device object
3786 * @sas_device_priv_data : per device driver private data
3787 * make sure device is unblocked without error, if not retry
3788 * by blocking and then unblocking
3792 _scsih_internal_device_unblock(struct scsi_device *sdev,
3793 struct MPT3SAS_DEVICE *sas_device_priv_data)
3797 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3798 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3799 sas_device_priv_data->block = 0;
3800 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3802 /* The device has been set to SDEV_RUNNING by SD layer during
3803 * device addition but the request queue is still stopped by
3804 * our earlier block call. We need to perform a block again
3805 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3807 sdev_printk(KERN_WARNING, sdev,
3808 "device_unblock failed with return(%d) for handle(0x%04x) "
3809 "performing a block followed by an unblock\n",
3810 r, sas_device_priv_data->sas_target->handle);
3811 sas_device_priv_data->block = 1;
3812 r = scsi_internal_device_block_nowait(sdev);
3814 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3815 "failed with return(%d) for handle(0x%04x)\n",
3816 r, sas_device_priv_data->sas_target->handle);
3818 sas_device_priv_data->block = 0;
3819 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3821 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3822 " failed with return(%d) for handle(0x%04x)\n",
3823 r, sas_device_priv_data->sas_target->handle);
3828 * _scsih_ublock_io_all_device - unblock every device
3829 * @ioc: per adapter object
3831 * change the device state from block to running
3834 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3836 struct MPT3SAS_DEVICE *sas_device_priv_data;
3837 struct scsi_device *sdev;
3839 shost_for_each_device(sdev, ioc->shost) {
3840 sas_device_priv_data = sdev->hostdata;
3841 if (!sas_device_priv_data)
3843 if (!sas_device_priv_data->block)
3846 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3847 "device_running, handle(0x%04x)\n",
3848 sas_device_priv_data->sas_target->handle));
3849 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3855 * _scsih_ublock_io_device - prepare device to be deleted
3856 * @ioc: per adapter object
3857 * @sas_address: sas address
3858 * @port: hba port entry
3860 * unblock then put device in offline state
3863 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3864 u64 sas_address, struct hba_port *port)
3866 struct MPT3SAS_DEVICE *sas_device_priv_data;
3867 struct scsi_device *sdev;
3869 shost_for_each_device(sdev, ioc->shost) {
3870 sas_device_priv_data = sdev->hostdata;
3871 if (!sas_device_priv_data)
3873 if (sas_device_priv_data->sas_target->sas_address
3876 if (sas_device_priv_data->sas_target->port != port)
3878 if (sas_device_priv_data->block)
3879 _scsih_internal_device_unblock(sdev,
3880 sas_device_priv_data);
3885 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3886 * @ioc: per adapter object
3888 * During device pull we need to appropriately set the sdev state.
3891 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3893 struct MPT3SAS_DEVICE *sas_device_priv_data;
3894 struct scsi_device *sdev;
3896 shost_for_each_device(sdev, ioc->shost) {
3897 sas_device_priv_data = sdev->hostdata;
3898 if (!sas_device_priv_data)
3900 if (sas_device_priv_data->block)
3902 if (sas_device_priv_data->ignore_delay_remove) {
3903 sdev_printk(KERN_INFO, sdev,
3904 "%s skip device_block for SES handle(0x%04x)\n",
3905 __func__, sas_device_priv_data->sas_target->handle);
3908 _scsih_internal_device_block(sdev, sas_device_priv_data);
3913 * _scsih_block_io_device - set the device state to SDEV_BLOCK
3914 * @ioc: per adapter object
3915 * @handle: device handle
3917 * During device pull we need to appropriately set the sdev state.
3920 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3922 struct MPT3SAS_DEVICE *sas_device_priv_data;
3923 struct scsi_device *sdev;
3924 struct _sas_device *sas_device;
3926 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3928 shost_for_each_device(sdev, ioc->shost) {
3929 sas_device_priv_data = sdev->hostdata;
3930 if (!sas_device_priv_data)
3932 if (sas_device_priv_data->sas_target->handle != handle)
3934 if (sas_device_priv_data->block)
3936 if (sas_device && sas_device->pend_sas_rphy_add)
3938 if (sas_device_priv_data->ignore_delay_remove) {
3939 sdev_printk(KERN_INFO, sdev,
3940 "%s skip device_block for SES handle(0x%04x)\n",
3941 __func__, sas_device_priv_data->sas_target->handle);
3944 _scsih_internal_device_block(sdev, sas_device_priv_data);
3948 sas_device_put(sas_device);
3952 * _scsih_block_io_to_children_attached_to_ex
3953 * @ioc: per adapter object
3954 * @sas_expander: the sas_device object
3956 * This routine set sdev state to SDEV_BLOCK for all devices
3957 * attached to this expander. This function called when expander is
3961 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3962 struct _sas_node *sas_expander)
3964 struct _sas_port *mpt3sas_port;
3965 struct _sas_device *sas_device;
3966 struct _sas_node *expander_sibling;
3967 unsigned long flags;
3972 list_for_each_entry(mpt3sas_port,
3973 &sas_expander->sas_port_list, port_list) {
3974 if (mpt3sas_port->remote_identify.device_type ==
3976 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3977 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3978 mpt3sas_port->remote_identify.sas_address,
3979 mpt3sas_port->hba_port);
3981 set_bit(sas_device->handle,
3982 ioc->blocking_handles);
3983 sas_device_put(sas_device);
3985 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3989 list_for_each_entry(mpt3sas_port,
3990 &sas_expander->sas_port_list, port_list) {
3992 if (mpt3sas_port->remote_identify.device_type ==
3993 SAS_EDGE_EXPANDER_DEVICE ||
3994 mpt3sas_port->remote_identify.device_type ==
3995 SAS_FANOUT_EXPANDER_DEVICE) {
3997 mpt3sas_scsih_expander_find_by_sas_address(
3998 ioc, mpt3sas_port->remote_identify.sas_address,
3999 mpt3sas_port->hba_port);
4000 _scsih_block_io_to_children_attached_to_ex(ioc,
4007 * _scsih_block_io_to_children_attached_directly
4008 * @ioc: per adapter object
4009 * @event_data: topology change event data
4011 * This routine set sdev state to SDEV_BLOCK for all devices
4012 * direct attached during device pull.
4015 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4016 Mpi2EventDataSasTopologyChangeList_t *event_data)
4022 for (i = 0; i < event_data->NumEntries; i++) {
4023 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4026 reason_code = event_data->PHY[i].PhyStatus &
4027 MPI2_EVENT_SAS_TOPO_RC_MASK;
4028 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
4029 _scsih_block_io_device(ioc, handle);
4034 * _scsih_block_io_to_pcie_children_attached_directly
4035 * @ioc: per adapter object
4036 * @event_data: topology change event data
4038 * This routine set sdev state to SDEV_BLOCK for all devices
4039 * direct attached during device pull/reconnect.
4042 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4043 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4049 for (i = 0; i < event_data->NumEntries; i++) {
4051 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4054 reason_code = event_data->PortEntry[i].PortStatus;
4056 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4057 _scsih_block_io_device(ioc, handle);
4061 * _scsih_tm_tr_send - send task management request
4062 * @ioc: per adapter object
4063 * @handle: device handle
4064 * Context: interrupt time.
4066 * This code is to initiate the device removal handshake protocol
4067 * with controller firmware. This function will issue target reset
4068 * using high priority request queue. It will send a sas iounit
4069 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
4071 * This is designed to send muliple task management request at the same
4072 * time to the fifo. If the fifo is full, we will append the request,
4073 * and process it in a future completion.
4076 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4078 Mpi2SCSITaskManagementRequest_t *mpi_request;
4080 struct _sas_device *sas_device = NULL;
4081 struct _pcie_device *pcie_device = NULL;
4082 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4083 u64 sas_address = 0;
4084 unsigned long flags;
4085 struct _tr_list *delayed_tr;
4088 struct hba_port *port = NULL;
4090 if (ioc->pci_error_recovery) {
4092 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4096 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4097 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4099 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4104 /* if PD, then return */
4105 if (test_bit(handle, ioc->pd_handles))
4108 clear_bit(handle, ioc->pend_os_device_add);
4110 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4111 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4112 if (sas_device && sas_device->starget &&
4113 sas_device->starget->hostdata) {
4114 sas_target_priv_data = sas_device->starget->hostdata;
4115 sas_target_priv_data->deleted = 1;
4116 sas_address = sas_device->sas_address;
4117 port = sas_device->port;
4119 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4121 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4122 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4123 if (pcie_device && pcie_device->starget &&
4124 pcie_device->starget->hostdata) {
4125 sas_target_priv_data = pcie_device->starget->hostdata;
4126 sas_target_priv_data->deleted = 1;
4127 sas_address = pcie_device->wwid;
4129 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4130 if (pcie_device && (!ioc->tm_custom_handling) &&
4131 (!(mpt3sas_scsih_is_pcie_scsi_device(
4132 pcie_device->device_info))))
4134 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4136 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4138 if (sas_target_priv_data) {
4140 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4141 handle, (u64)sas_address));
4143 if (sas_device->enclosure_handle != 0)
4145 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4146 (u64)sas_device->enclosure_logical_id,
4148 if (sas_device->connector_name[0] != '\0')
4150 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4151 sas_device->enclosure_level,
4152 sas_device->connector_name));
4153 } else if (pcie_device) {
4154 if (pcie_device->enclosure_handle != 0)
4156 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4157 (u64)pcie_device->enclosure_logical_id,
4158 pcie_device->slot));
4159 if (pcie_device->connector_name[0] != '\0')
4161 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4162 pcie_device->enclosure_level,
4163 pcie_device->connector_name));
4165 _scsih_ublock_io_device(ioc, sas_address, port);
4166 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4169 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4171 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4174 INIT_LIST_HEAD(&delayed_tr->list);
4175 delayed_tr->handle = handle;
4176 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4178 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4184 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4185 handle, smid, ioc->tm_tr_cb_idx));
4186 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4187 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4188 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4189 mpi_request->DevHandle = cpu_to_le16(handle);
4190 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4191 mpi_request->MsgFlags = tr_method;
4192 set_bit(handle, ioc->device_remove_in_progress);
4193 ioc->put_smid_hi_priority(ioc, smid, 0);
4194 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4198 sas_device_put(sas_device);
4200 pcie_device_put(pcie_device);
4204 * _scsih_tm_tr_complete -
4205 * @ioc: per adapter object
4206 * @smid: system request message index
4207 * @msix_index: MSIX table index supplied by the OS
4208 * @reply: reply message frame(lower 32bit addr)
4209 * Context: interrupt time.
4211 * This is the target reset completion routine.
4212 * This code is part of the code to initiate the device removal
4213 * handshake protocol with controller firmware.
4214 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4216 * Return: 1 meaning mf should be freed from _base_interrupt
4217 * 0 means the mf is freed from this function.
4220 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4224 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4225 Mpi2SCSITaskManagementReply_t *mpi_reply =
4226 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4227 Mpi2SasIoUnitControlRequest_t *mpi_request;
4230 struct _sc_list *delayed_sc;
4232 if (ioc->pci_error_recovery) {
4234 ioc_info(ioc, "%s: host in pci error recovery\n",
4238 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4239 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4241 ioc_info(ioc, "%s: host is not operational\n",
4245 if (unlikely(!mpi_reply)) {
4246 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4247 __FILE__, __LINE__, __func__);
4250 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4251 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4252 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4254 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4256 le16_to_cpu(mpi_reply->DevHandle), smid));
4260 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4262 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4263 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4264 le32_to_cpu(mpi_reply->IOCLogInfo),
4265 le32_to_cpu(mpi_reply->TerminationCount)));
4267 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4268 if (!smid_sas_ctrl) {
4269 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4271 return _scsih_check_for_pending_tm(ioc, smid);
4272 INIT_LIST_HEAD(&delayed_sc->list);
4273 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4274 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4276 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4278 return _scsih_check_for_pending_tm(ioc, smid);
4282 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4283 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4284 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4285 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4286 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4287 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4288 mpi_request->DevHandle = mpi_request_tm->DevHandle;
4289 ioc->put_smid_default(ioc, smid_sas_ctrl);
4291 return _scsih_check_for_pending_tm(ioc, smid);
4294 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4295 * issue to IOC or not.
4296 * @ioc: per adapter object
4297 * @scmd: pointer to scsi command object
4299 * Returns true if scmd can be issued to IOC otherwise returns false.
4301 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4302 struct scsi_cmnd *scmd)
4305 if (ioc->pci_error_recovery)
4308 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4309 if (ioc->remove_host)
4315 if (ioc->remove_host) {
4317 switch (scmd->cmnd[0]) {
4318 case SYNCHRONIZE_CACHE:
4330 * _scsih_sas_control_complete - completion routine
4331 * @ioc: per adapter object
4332 * @smid: system request message index
4333 * @msix_index: MSIX table index supplied by the OS
4334 * @reply: reply message frame(lower 32bit addr)
4335 * Context: interrupt time.
4337 * This is the sas iounit control completion routine.
4338 * This code is part of the code to initiate the device removal
4339 * handshake protocol with controller firmware.
4341 * Return: 1 meaning mf should be freed from _base_interrupt
4342 * 0 means the mf is freed from this function.
4345 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4346 u8 msix_index, u32 reply)
4348 Mpi2SasIoUnitControlReply_t *mpi_reply =
4349 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4351 if (likely(mpi_reply)) {
4353 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4354 le16_to_cpu(mpi_reply->DevHandle), smid,
4355 le16_to_cpu(mpi_reply->IOCStatus),
4356 le32_to_cpu(mpi_reply->IOCLogInfo)));
4357 if (le16_to_cpu(mpi_reply->IOCStatus) ==
4358 MPI2_IOCSTATUS_SUCCESS) {
4359 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4360 ioc->device_remove_in_progress);
4363 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4364 __FILE__, __LINE__, __func__);
4366 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4370 * _scsih_tm_tr_volume_send - send target reset request for volumes
4371 * @ioc: per adapter object
4372 * @handle: device handle
4373 * Context: interrupt time.
4375 * This is designed to send muliple task management request at the same
4376 * time to the fifo. If the fifo is full, we will append the request,
4377 * and process it in a future completion.
4380 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4382 Mpi2SCSITaskManagementRequest_t *mpi_request;
4384 struct _tr_list *delayed_tr;
4386 if (ioc->pci_error_recovery) {
4388 ioc_info(ioc, "%s: host reset in progress!\n",
4393 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4395 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4398 INIT_LIST_HEAD(&delayed_tr->list);
4399 delayed_tr->handle = handle;
4400 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4402 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4408 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4409 handle, smid, ioc->tm_tr_volume_cb_idx));
4410 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4411 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4412 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4413 mpi_request->DevHandle = cpu_to_le16(handle);
4414 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4415 ioc->put_smid_hi_priority(ioc, smid, 0);
4419 * _scsih_tm_volume_tr_complete - target reset completion
4420 * @ioc: per adapter object
4421 * @smid: system request message index
4422 * @msix_index: MSIX table index supplied by the OS
4423 * @reply: reply message frame(lower 32bit addr)
4424 * Context: interrupt time.
4426 * Return: 1 meaning mf should be freed from _base_interrupt
4427 * 0 means the mf is freed from this function.
4430 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4431 u8 msix_index, u32 reply)
4434 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4435 Mpi2SCSITaskManagementReply_t *mpi_reply =
4436 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4438 if (ioc->shost_recovery || ioc->pci_error_recovery) {
4440 ioc_info(ioc, "%s: host reset in progress!\n",
4444 if (unlikely(!mpi_reply)) {
4445 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4446 __FILE__, __LINE__, __func__);
4450 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4451 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4452 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4454 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4455 handle, le16_to_cpu(mpi_reply->DevHandle),
4461 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4462 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4463 le32_to_cpu(mpi_reply->IOCLogInfo),
4464 le32_to_cpu(mpi_reply->TerminationCount)));
4466 return _scsih_check_for_pending_tm(ioc, smid);
4470 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4471 * @ioc: per adapter object
4472 * @smid: system request message index
4474 * @event_context: used to track events uniquely
4476 * Context - processed in interrupt context.
4479 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4482 Mpi2EventAckRequest_t *ack_request;
4483 int i = smid - ioc->internal_smid;
4484 unsigned long flags;
4486 /* Without releasing the smid just update the
4487 * call back index and reuse the same smid for
4488 * processing this delayed request
4490 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4491 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4492 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4495 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4496 le16_to_cpu(event), smid, ioc->base_cb_idx));
4497 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4498 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4499 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4500 ack_request->Event = event;
4501 ack_request->EventContext = event_context;
4502 ack_request->VF_ID = 0; /* TODO */
4503 ack_request->VP_ID = 0;
4504 ioc->put_smid_default(ioc, smid);
4508 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4509 * sas_io_unit_ctrl messages
4510 * @ioc: per adapter object
4511 * @smid: system request message index
4512 * @handle: device handle
4514 * Context - processed in interrupt context.
4517 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4518 u16 smid, u16 handle)
4520 Mpi2SasIoUnitControlRequest_t *mpi_request;
4522 int i = smid - ioc->internal_smid;
4523 unsigned long flags;
4525 if (ioc->remove_host) {
4527 ioc_info(ioc, "%s: host has been removed\n",
4530 } else if (ioc->pci_error_recovery) {
4532 ioc_info(ioc, "%s: host in pci error recovery\n",
4536 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4537 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4539 ioc_info(ioc, "%s: host is not operational\n",
4544 /* Without releasing the smid just update the
4545 * call back index and reuse the same smid for
4546 * processing this delayed request
4548 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4549 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4550 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4553 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4554 handle, smid, ioc->tm_sas_control_cb_idx));
4555 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4556 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4557 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4558 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4559 mpi_request->DevHandle = cpu_to_le16(handle);
4560 ioc->put_smid_default(ioc, smid);
4564 * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
4565 * @ioc: per adapter object
4566 * @smid: system request message index
4568 * Context: Executed in interrupt context
4570 * This will check delayed internal messages list, and process the
4573 * Return: 1 meaning mf should be freed from _base_interrupt
4574 * 0 means the mf is freed from this function.
4577 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4579 struct _sc_list *delayed_sc;
4580 struct _event_ack_list *delayed_event_ack;
4582 if (!list_empty(&ioc->delayed_event_ack_list)) {
4583 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4584 struct _event_ack_list, list);
4585 _scsih_issue_delayed_event_ack(ioc, smid,
4586 delayed_event_ack->Event, delayed_event_ack->EventContext);
4587 list_del(&delayed_event_ack->list);
4588 kfree(delayed_event_ack);
4592 if (!list_empty(&ioc->delayed_sc_list)) {
4593 delayed_sc = list_entry(ioc->delayed_sc_list.next,
4594 struct _sc_list, list);
4595 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4596 delayed_sc->handle);
4597 list_del(&delayed_sc->list);
4605 * _scsih_check_for_pending_tm - check for pending task management
4606 * @ioc: per adapter object
4607 * @smid: system request message index
4609 * This will check delayed target reset list, and feed the
4612 * Return: 1 meaning mf should be freed from _base_interrupt
4613 * 0 means the mf is freed from this function.
4616 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4618 struct _tr_list *delayed_tr;
4620 if (!list_empty(&ioc->delayed_tr_volume_list)) {
4621 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4622 struct _tr_list, list);
4623 mpt3sas_base_free_smid(ioc, smid);
4624 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4625 list_del(&delayed_tr->list);
4630 if (!list_empty(&ioc->delayed_tr_list)) {
4631 delayed_tr = list_entry(ioc->delayed_tr_list.next,
4632 struct _tr_list, list);
4633 mpt3sas_base_free_smid(ioc, smid);
4634 _scsih_tm_tr_send(ioc, delayed_tr->handle);
4635 list_del(&delayed_tr->list);
4644 * _scsih_check_topo_delete_events - sanity check on topo events
4645 * @ioc: per adapter object
4646 * @event_data: the event data payload
4648 * This routine added to better handle cable breaker.
4650 * This handles the case where driver receives multiple expander
4651 * add and delete events in a single shot. When there is a delete event
4652 * the routine will void any pending add events waiting in the event queue.
4655 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4656 Mpi2EventDataSasTopologyChangeList_t *event_data)
4658 struct fw_event_work *fw_event;
4659 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4660 u16 expander_handle;
4661 struct _sas_node *sas_expander;
4662 unsigned long flags;
4666 for (i = 0 ; i < event_data->NumEntries; i++) {
4667 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4670 reason_code = event_data->PHY[i].PhyStatus &
4671 MPI2_EVENT_SAS_TOPO_RC_MASK;
4672 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4673 _scsih_tm_tr_send(ioc, handle);
4676 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4677 if (expander_handle < ioc->sas_hba.num_phys) {
4678 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4681 if (event_data->ExpStatus ==
4682 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4683 /* put expander attached devices into blocking state */
4684 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4685 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4687 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4688 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4690 handle = find_first_bit(ioc->blocking_handles,
4691 ioc->facts.MaxDevHandle);
4692 if (handle < ioc->facts.MaxDevHandle)
4693 _scsih_block_io_device(ioc, handle);
4694 } while (test_and_clear_bit(handle, ioc->blocking_handles));
4695 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4696 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4698 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4701 /* mark ignore flag for pending events */
4702 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4703 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4704 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4707 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4708 fw_event->event_data;
4709 if (local_event_data->ExpStatus ==
4710 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4711 local_event_data->ExpStatus ==
4712 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4713 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4716 ioc_info(ioc, "setting ignoring flag\n"));
4717 fw_event->ignore = 1;
4721 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4725 * _scsih_check_pcie_topo_remove_events - sanity check on topo
4727 * @ioc: per adapter object
4728 * @event_data: the event data payload
4730 * This handles the case where driver receives multiple switch
4731 * or device add and delete events in a single shot. When there
4732 * is a delete event the routine will void any pending add
4733 * events waiting in the event queue.
4736 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4737 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4739 struct fw_event_work *fw_event;
4740 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4741 unsigned long flags;
4743 u16 handle, switch_handle;
4745 for (i = 0; i < event_data->NumEntries; i++) {
4747 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4750 reason_code = event_data->PortEntry[i].PortStatus;
4751 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4752 _scsih_tm_tr_send(ioc, handle);
4755 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4756 if (!switch_handle) {
4757 _scsih_block_io_to_pcie_children_attached_directly(
4761 /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4762 if ((event_data->SwitchStatus
4763 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4764 (event_data->SwitchStatus ==
4765 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4766 _scsih_block_io_to_pcie_children_attached_directly(
4769 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4772 /* mark ignore flag for pending events */
4773 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4774 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4775 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4779 (Mpi26EventDataPCIeTopologyChangeList_t *)
4780 fw_event->event_data;
4781 if (local_event_data->SwitchStatus ==
4782 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4783 local_event_data->SwitchStatus ==
4784 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4785 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4788 ioc_info(ioc, "setting ignoring flag for switch event\n"));
4789 fw_event->ignore = 1;
4793 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4797 * _scsih_set_volume_delete_flag - setting volume delete flag
4798 * @ioc: per adapter object
4799 * @handle: device handle
4801 * This returns nothing.
4804 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4806 struct _raid_device *raid_device;
4807 struct MPT3SAS_TARGET *sas_target_priv_data;
4808 unsigned long flags;
4810 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4811 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4812 if (raid_device && raid_device->starget &&
4813 raid_device->starget->hostdata) {
4814 sas_target_priv_data =
4815 raid_device->starget->hostdata;
4816 sas_target_priv_data->deleted = 1;
4818 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4819 handle, (u64)raid_device->wwid));
4821 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4825 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4826 * @handle: input handle
4827 * @a: handle for volume a
4828 * @b: handle for volume b
4830 * IR firmware only supports two raid volumes. The purpose of this
4831 * routine is to set the volume handle in either a or b. When the given
4832 * input handle is non-zero, or when a and b have not been set before.
4835 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4837 if (!handle || handle == *a || handle == *b)
4846 * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4847 * @ioc: per adapter object
4848 * @event_data: the event data payload
4849 * Context: interrupt time.
4851 * This routine will send target reset to volume, followed by target
4852 * resets to the PDs. This is called when a PD has been removed, or
4853 * volume has been deleted or removed. When the target reset is sent
4854 * to volume, the PD target resets need to be queued to start upon
4855 * completion of the volume target reset.
4858 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4859 Mpi2EventDataIrConfigChangeList_t *event_data)
4861 Mpi2EventIrConfigElement_t *element;
4863 u16 handle, volume_handle, a, b;
4864 struct _tr_list *delayed_tr;
4869 if (ioc->is_warpdrive)
4872 /* Volume Resets for Deleted or Removed */
4873 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4874 for (i = 0; i < event_data->NumElements; i++, element++) {
4875 if (le32_to_cpu(event_data->Flags) &
4876 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4878 if (element->ReasonCode ==
4879 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4880 element->ReasonCode ==
4881 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4882 volume_handle = le16_to_cpu(element->VolDevHandle);
4883 _scsih_set_volume_delete_flag(ioc, volume_handle);
4884 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4888 /* Volume Resets for UNHIDE events */
4889 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4890 for (i = 0; i < event_data->NumElements; i++, element++) {
4891 if (le32_to_cpu(event_data->Flags) &
4892 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4894 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4895 volume_handle = le16_to_cpu(element->VolDevHandle);
4896 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4901 _scsih_tm_tr_volume_send(ioc, a);
4903 _scsih_tm_tr_volume_send(ioc, b);
4905 /* PD target resets */
4906 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4907 for (i = 0; i < event_data->NumElements; i++, element++) {
4908 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4910 handle = le16_to_cpu(element->PhysDiskDevHandle);
4911 volume_handle = le16_to_cpu(element->VolDevHandle);
4912 clear_bit(handle, ioc->pd_handles);
4914 _scsih_tm_tr_send(ioc, handle);
4915 else if (volume_handle == a || volume_handle == b) {
4916 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4917 BUG_ON(!delayed_tr);
4918 INIT_LIST_HEAD(&delayed_tr->list);
4919 delayed_tr->handle = handle;
4920 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4922 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4925 _scsih_tm_tr_send(ioc, handle);
4931 * _scsih_check_volume_delete_events - set delete flag for volumes
4932 * @ioc: per adapter object
4933 * @event_data: the event data payload
4934 * Context: interrupt time.
4936 * This will handle the case when the cable connected to entire volume is
4937 * pulled. We will take care of setting the deleted flag so normal IO will
4941 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4942 Mpi2EventDataIrVolume_t *event_data)
4946 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4948 state = le32_to_cpu(event_data->NewValue);
4949 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4950 MPI2_RAID_VOL_STATE_FAILED)
4951 _scsih_set_volume_delete_flag(ioc,
4952 le16_to_cpu(event_data->VolDevHandle));
4956 * _scsih_temp_threshold_events - display temperature threshold exceeded events
4957 * @ioc: per adapter object
4958 * @event_data: the temp threshold event data
4959 * Context: interrupt time.
4962 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4963 Mpi2EventDataTemperature_t *event_data)
4966 if (ioc->temp_sensors_count >= event_data->SensorNum) {
4967 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4968 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4969 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4970 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4971 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4972 event_data->SensorNum);
4973 ioc_err(ioc, "Current Temp In Celsius: %d\n",
4974 event_data->CurrentTemperature);
4975 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4976 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4977 if ((doorbell & MPI2_IOC_STATE_MASK) ==
4978 MPI2_IOC_STATE_FAULT) {
4979 mpt3sas_print_fault_code(ioc,
4980 doorbell & MPI2_DOORBELL_DATA_MASK);
4981 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4982 MPI2_IOC_STATE_COREDUMP) {
4983 mpt3sas_print_coredump_info(ioc,
4984 doorbell & MPI2_DOORBELL_DATA_MASK);
4990 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4992 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4994 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4998 return test_and_set_bit(0, &priv->ata_command_pending);
5000 clear_bit(0, &priv->ata_command_pending);
5005 * _scsih_flush_running_cmds - completing outstanding commands.
5006 * @ioc: per adapter object
5008 * The flushing out of all pending scmd commands following host reset,
5009 * where all IO is dropped to the floor.
5012 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
5014 struct scsi_cmnd *scmd;
5015 struct scsiio_tracker *st;
5019 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
5020 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5024 _scsih_set_satl_pending(scmd, false);
5025 st = scsi_cmd_priv(scmd);
5026 mpt3sas_base_clear_st(ioc, st);
5027 scsi_dma_unmap(scmd);
5028 if (ioc->pci_error_recovery || ioc->remove_host)
5029 scmd->result = DID_NO_CONNECT << 16;
5031 scmd->result = DID_RESET << 16;
5032 scmd->scsi_done(scmd);
5034 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
5038 * _scsih_setup_eedp - setup MPI request for EEDP transfer
5039 * @ioc: per adapter object
5040 * @scmd: pointer to scsi command object
5041 * @mpi_request: pointer to the SCSI_IO request message frame
5043 * Supporting protection 1 and 3.
5046 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5047 Mpi25SCSIIORequest_t *mpi_request)
5050 unsigned char prot_op = scsi_get_prot_op(scmd);
5051 unsigned char prot_type = scsi_get_prot_type(scmd);
5052 Mpi25SCSIIORequest_t *mpi_request_3v =
5053 (Mpi25SCSIIORequest_t *)mpi_request;
5055 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
5058 if (prot_op == SCSI_PROT_READ_STRIP)
5059 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5060 else if (prot_op == SCSI_PROT_WRITE_INSERT)
5061 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5065 switch (prot_type) {
5066 case SCSI_PROT_DIF_TYPE1:
5067 case SCSI_PROT_DIF_TYPE2:
5070 * enable ref/guard checking
5071 * auto increment ref tag
5073 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
5074 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
5075 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5076 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5077 cpu_to_be32(t10_pi_ref_tag(scmd->request));
5080 case SCSI_PROT_DIF_TYPE3:
5083 * enable guard checking
5085 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5090 mpi_request_3v->EEDPBlockSize =
5091 cpu_to_le16(scmd->device->sector_size);
5093 if (ioc->is_gen35_ioc)
5094 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5095 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5099 * _scsih_eedp_error_handling - return sense code for EEDP errors
5100 * @scmd: pointer to scsi command object
5101 * @ioc_status: ioc status
5104 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5108 switch (ioc_status) {
5109 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5112 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5115 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5122 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq);
5123 set_host_byte(scmd, DID_ABORT);
5127 * scsih_qcmd - main scsi request entry point
5128 * @shost: SCSI host pointer
5129 * @scmd: pointer to scsi command object
5131 * The callback index is set inside `ioc->scsi_io_cb_idx`.
5133 * Return: 0 on success. If there's a failure, return either:
5134 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
5135 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
5138 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5140 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5141 struct MPT3SAS_DEVICE *sas_device_priv_data;
5142 struct MPT3SAS_TARGET *sas_target_priv_data;
5143 struct _raid_device *raid_device;
5144 struct request *rq = scmd->request;
5146 Mpi25SCSIIORequest_t *mpi_request;
5147 struct _pcie_device *pcie_device = NULL;
5152 if (ioc->logging_level & MPT_DEBUG_SCSI)
5153 scsi_print_command(scmd);
5155 sas_device_priv_data = scmd->device->hostdata;
5156 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5157 scmd->result = DID_NO_CONNECT << 16;
5158 scmd->scsi_done(scmd);
5162 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5163 scmd->result = DID_NO_CONNECT << 16;
5164 scmd->scsi_done(scmd);
5168 sas_target_priv_data = sas_device_priv_data->sas_target;
5170 /* invalid device handle */
5171 handle = sas_target_priv_data->handle;
5172 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5173 scmd->result = DID_NO_CONNECT << 16;
5174 scmd->scsi_done(scmd);
5179 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5180 /* host recovery or link resets sent via IOCTLs */
5181 return SCSI_MLQUEUE_HOST_BUSY;
5182 } else if (sas_target_priv_data->deleted) {
5183 /* device has been deleted */
5184 scmd->result = DID_NO_CONNECT << 16;
5185 scmd->scsi_done(scmd);
5187 } else if (sas_target_priv_data->tm_busy ||
5188 sas_device_priv_data->block) {
5189 /* device busy with task management */
5190 return SCSI_MLQUEUE_DEVICE_BUSY;
5194 * Bug work around for firmware SATL handling. The loop
5195 * is based on atomic operations and ensures consistency
5196 * since we're lockless at this point
5199 if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5200 return SCSI_MLQUEUE_DEVICE_BUSY;
5201 } while (_scsih_set_satl_pending(scmd, true));
5203 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5204 mpi_control = MPI2_SCSIIO_CONTROL_READ;
5205 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5206 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5208 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5211 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5212 /* NCQ Prio supported, make sure control indicated high priority */
5213 if (sas_device_priv_data->ncq_prio_enable) {
5214 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5215 if (class == IOPRIO_CLASS_RT)
5216 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5218 /* Make sure Device is not raid volume.
5219 * We do not expose raid functionality to upper layer for warpdrive.
5221 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5222 && !scsih_is_nvme(&scmd->device->sdev_gendev))
5223 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5224 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5226 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5228 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5229 _scsih_set_satl_pending(scmd, false);
5232 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5233 memset(mpi_request, 0, ioc->request_sz);
5234 _scsih_setup_eedp(ioc, scmd, mpi_request);
5236 if (scmd->cmd_len == 32)
5237 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5238 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5239 if (sas_device_priv_data->sas_target->flags &
5240 MPT_TARGET_FLAGS_RAID_COMPONENT)
5241 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5243 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5244 mpi_request->DevHandle = cpu_to_le16(handle);
5245 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5246 mpi_request->Control = cpu_to_le32(mpi_control);
5247 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5248 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5249 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5250 mpi_request->SenseBufferLowAddress =
5251 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5252 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5253 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5255 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5257 if (mpi_request->DataLength) {
5258 pcie_device = sas_target_priv_data->pcie_dev;
5259 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5260 mpt3sas_base_free_smid(ioc, smid);
5261 _scsih_set_satl_pending(scmd, false);
5265 ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5267 raid_device = sas_target_priv_data->raid_device;
5268 if (raid_device && raid_device->direct_io_enabled)
5269 mpt3sas_setup_direct_io(ioc, scmd,
5270 raid_device, mpi_request);
5272 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5273 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5274 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5275 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5276 ioc->put_smid_fast_path(ioc, smid, handle);
5278 ioc->put_smid_scsi_io(ioc, smid,
5279 le16_to_cpu(mpi_request->DevHandle));
5281 ioc->put_smid_default(ioc, smid);
5285 return SCSI_MLQUEUE_HOST_BUSY;
5289 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5290 * @sense_buffer: sense data returned by target
5291 * @data: normalized skey/asc/ascq
5294 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5296 if ((sense_buffer[0] & 0x7F) >= 0x72) {
5297 /* descriptor format */
5298 data->skey = sense_buffer[1] & 0x0F;
5299 data->asc = sense_buffer[2];
5300 data->ascq = sense_buffer[3];
5303 data->skey = sense_buffer[2] & 0x0F;
5304 data->asc = sense_buffer[12];
5305 data->ascq = sense_buffer[13];
5310 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
5311 * @ioc: per adapter object
5312 * @scmd: pointer to scsi command object
5313 * @mpi_reply: reply mf payload returned from firmware
5316 * scsi_status - SCSI Status code returned from target device
5317 * scsi_state - state info associated with SCSI_IO determined by ioc
5318 * ioc_status - ioc supplied status info
5321 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5322 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5326 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5327 MPI2_IOCSTATUS_MASK;
5328 u8 scsi_state = mpi_reply->SCSIState;
5329 u8 scsi_status = mpi_reply->SCSIStatus;
5330 char *desc_ioc_state = NULL;
5331 char *desc_scsi_status = NULL;
5332 char *desc_scsi_state = ioc->tmp_string;
5333 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5334 struct _sas_device *sas_device = NULL;
5335 struct _pcie_device *pcie_device = NULL;
5336 struct scsi_target *starget = scmd->device->sdev_target;
5337 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5338 char *device_str = NULL;
5342 if (ioc->hide_ir_msg)
5343 device_str = "WarpDrive";
5345 device_str = "volume";
5347 if (log_info == 0x31170000)
5350 switch (ioc_status) {
5351 case MPI2_IOCSTATUS_SUCCESS:
5352 desc_ioc_state = "success";
5354 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5355 desc_ioc_state = "invalid function";
5357 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5358 desc_ioc_state = "scsi recovered error";
5360 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5361 desc_ioc_state = "scsi invalid dev handle";
5363 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5364 desc_ioc_state = "scsi device not there";
5366 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5367 desc_ioc_state = "scsi data overrun";
5369 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5370 desc_ioc_state = "scsi data underrun";
5372 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5373 desc_ioc_state = "scsi io data error";
5375 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5376 desc_ioc_state = "scsi protocol error";
5378 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5379 desc_ioc_state = "scsi task terminated";
5381 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5382 desc_ioc_state = "scsi residual mismatch";
5384 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5385 desc_ioc_state = "scsi task mgmt failed";
5387 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5388 desc_ioc_state = "scsi ioc terminated";
5390 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5391 desc_ioc_state = "scsi ext terminated";
5393 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5394 desc_ioc_state = "eedp guard error";
5396 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5397 desc_ioc_state = "eedp ref tag error";
5399 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5400 desc_ioc_state = "eedp app tag error";
5402 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5403 desc_ioc_state = "insufficient power";
5406 desc_ioc_state = "unknown";
5410 switch (scsi_status) {
5411 case MPI2_SCSI_STATUS_GOOD:
5412 desc_scsi_status = "good";
5414 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5415 desc_scsi_status = "check condition";
5417 case MPI2_SCSI_STATUS_CONDITION_MET:
5418 desc_scsi_status = "condition met";
5420 case MPI2_SCSI_STATUS_BUSY:
5421 desc_scsi_status = "busy";
5423 case MPI2_SCSI_STATUS_INTERMEDIATE:
5424 desc_scsi_status = "intermediate";
5426 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5427 desc_scsi_status = "intermediate condmet";
5429 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5430 desc_scsi_status = "reservation conflict";
5432 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5433 desc_scsi_status = "command terminated";
5435 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5436 desc_scsi_status = "task set full";
5438 case MPI2_SCSI_STATUS_ACA_ACTIVE:
5439 desc_scsi_status = "aca active";
5441 case MPI2_SCSI_STATUS_TASK_ABORTED:
5442 desc_scsi_status = "task aborted";
5445 desc_scsi_status = "unknown";
5449 desc_scsi_state[0] = '\0';
5451 desc_scsi_state = " ";
5452 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5453 strcat(desc_scsi_state, "response info ");
5454 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5455 strcat(desc_scsi_state, "state terminated ");
5456 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5457 strcat(desc_scsi_state, "no status ");
5458 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5459 strcat(desc_scsi_state, "autosense failed ");
5460 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5461 strcat(desc_scsi_state, "autosense valid ");
5463 scsi_print_command(scmd);
5465 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5466 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5467 device_str, (u64)priv_target->sas_address);
5468 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5469 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5471 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5472 (u64)pcie_device->wwid, pcie_device->port_num);
5473 if (pcie_device->enclosure_handle != 0)
5474 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5475 (u64)pcie_device->enclosure_logical_id,
5477 if (pcie_device->connector_name[0])
5478 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5479 pcie_device->enclosure_level,
5480 pcie_device->connector_name);
5481 pcie_device_put(pcie_device);
5484 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5486 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5487 (u64)sas_device->sas_address, sas_device->phy);
5489 _scsih_display_enclosure_chassis_info(ioc, sas_device,
5492 sas_device_put(sas_device);
5496 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5497 le16_to_cpu(mpi_reply->DevHandle),
5498 desc_ioc_state, ioc_status, smid);
5499 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5500 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5501 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5502 le16_to_cpu(mpi_reply->TaskTag),
5503 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5504 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5505 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5507 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5508 struct sense_info data;
5509 _scsih_normalize_sense(scmd->sense_buffer, &data);
5510 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5511 data.skey, data.asc, data.ascq,
5512 le32_to_cpu(mpi_reply->SenseCount));
5514 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5515 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5516 response_bytes = (u8 *)&response_info;
5517 _scsih_response_code(ioc, response_bytes[0]);
5522 * _scsih_turn_on_pfa_led - illuminate PFA LED
5523 * @ioc: per adapter object
5524 * @handle: device handle
5528 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5530 Mpi2SepReply_t mpi_reply;
5531 Mpi2SepRequest_t mpi_request;
5532 struct _sas_device *sas_device;
5534 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5538 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5539 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5540 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5541 mpi_request.SlotStatus =
5542 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5543 mpi_request.DevHandle = cpu_to_le16(handle);
5544 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5545 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5546 &mpi_request)) != 0) {
5547 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5548 __FILE__, __LINE__, __func__);
5551 sas_device->pfa_led_on = 1;
5553 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5555 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5556 le16_to_cpu(mpi_reply.IOCStatus),
5557 le32_to_cpu(mpi_reply.IOCLogInfo)));
5561 sas_device_put(sas_device);
5565 * _scsih_turn_off_pfa_led - turn off Fault LED
5566 * @ioc: per adapter object
5567 * @sas_device: sas device whose PFA LED has to turned off
5571 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5572 struct _sas_device *sas_device)
5574 Mpi2SepReply_t mpi_reply;
5575 Mpi2SepRequest_t mpi_request;
5577 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5578 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5579 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5580 mpi_request.SlotStatus = 0;
5581 mpi_request.Slot = cpu_to_le16(sas_device->slot);
5582 mpi_request.DevHandle = 0;
5583 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5584 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5585 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5586 &mpi_request)) != 0) {
5587 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5588 __FILE__, __LINE__, __func__);
5592 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5594 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5595 le16_to_cpu(mpi_reply.IOCStatus),
5596 le32_to_cpu(mpi_reply.IOCLogInfo)));
5602 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5603 * @ioc: per adapter object
5604 * @handle: device handle
5605 * Context: interrupt.
5608 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5610 struct fw_event_work *fw_event;
5612 fw_event = alloc_fw_event_work(0);
5615 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5616 fw_event->device_handle = handle;
5617 fw_event->ioc = ioc;
5618 _scsih_fw_event_add(ioc, fw_event);
5619 fw_event_work_put(fw_event);
5623 * _scsih_smart_predicted_fault - process smart errors
5624 * @ioc: per adapter object
5625 * @handle: device handle
5626 * Context: interrupt.
5629 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5631 struct scsi_target *starget;
5632 struct MPT3SAS_TARGET *sas_target_priv_data;
5633 Mpi2EventNotificationReply_t *event_reply;
5634 Mpi2EventDataSasDeviceStatusChange_t *event_data;
5635 struct _sas_device *sas_device;
5637 unsigned long flags;
5639 /* only handle non-raid devices */
5640 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5641 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5645 starget = sas_device->starget;
5646 sas_target_priv_data = starget->hostdata;
5648 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5649 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5652 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5654 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5656 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5657 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5659 /* insert into event log */
5660 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5661 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5662 event_reply = kzalloc(sz, GFP_ATOMIC);
5664 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5665 __FILE__, __LINE__, __func__);
5669 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5670 event_reply->Event =
5671 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5672 event_reply->MsgLength = sz/4;
5673 event_reply->EventDataLength =
5674 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5675 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5676 event_reply->EventData;
5677 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5678 event_data->ASC = 0x5D;
5679 event_data->DevHandle = cpu_to_le16(handle);
5680 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5681 mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5685 sas_device_put(sas_device);
5689 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5694 * _scsih_io_done - scsi request callback
5695 * @ioc: per adapter object
5696 * @smid: system request message index
5697 * @msix_index: MSIX table index supplied by the OS
5698 * @reply: reply message frame(lower 32bit addr)
5700 * Callback handler when using _scsih_qcmd.
5702 * Return: 1 meaning mf should be freed from _base_interrupt
5703 * 0 means the mf is freed from this function.
5706 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5708 Mpi25SCSIIORequest_t *mpi_request;
5709 Mpi2SCSIIOReply_t *mpi_reply;
5710 struct scsi_cmnd *scmd;
5711 struct scsiio_tracker *st;
5717 struct MPT3SAS_DEVICE *sas_device_priv_data;
5718 u32 response_code = 0;
5720 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5722 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5726 _scsih_set_satl_pending(scmd, false);
5728 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5730 if (mpi_reply == NULL) {
5731 scmd->result = DID_OK << 16;
5735 sas_device_priv_data = scmd->device->hostdata;
5736 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5737 sas_device_priv_data->sas_target->deleted) {
5738 scmd->result = DID_NO_CONNECT << 16;
5741 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5744 * WARPDRIVE: If direct_io is set then it is directIO,
5745 * the failed direct I/O should be redirected to volume
5747 st = scsi_cmd_priv(scmd);
5748 if (st->direct_io &&
5749 ((ioc_status & MPI2_IOCSTATUS_MASK)
5750 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5753 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5754 mpi_request->DevHandle =
5755 cpu_to_le16(sas_device_priv_data->sas_target->handle);
5756 ioc->put_smid_scsi_io(ioc, smid,
5757 sas_device_priv_data->sas_target->handle);
5760 /* turning off TLR */
5761 scsi_state = mpi_reply->SCSIState;
5762 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5764 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5765 if (!sas_device_priv_data->tlr_snoop_check) {
5766 sas_device_priv_data->tlr_snoop_check++;
5767 if ((!ioc->is_warpdrive &&
5768 !scsih_is_raid(&scmd->device->sdev_gendev) &&
5769 !scsih_is_nvme(&scmd->device->sdev_gendev))
5770 && sas_is_tlr_enabled(scmd->device) &&
5771 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5772 sas_disable_tlr(scmd->device);
5773 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5777 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5778 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5779 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5780 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5783 ioc_status &= MPI2_IOCSTATUS_MASK;
5784 scsi_status = mpi_reply->SCSIStatus;
5786 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5787 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5788 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5789 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5790 ioc_status = MPI2_IOCSTATUS_SUCCESS;
5793 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5794 struct sense_info data;
5795 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5797 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5798 le32_to_cpu(mpi_reply->SenseCount));
5799 memcpy(scmd->sense_buffer, sense_data, sz);
5800 _scsih_normalize_sense(scmd->sense_buffer, &data);
5801 /* failure prediction threshold exceeded */
5802 if (data.asc == 0x5D)
5803 _scsih_smart_predicted_fault(ioc,
5804 le16_to_cpu(mpi_reply->DevHandle));
5805 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5807 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5808 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5809 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5810 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5811 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5813 switch (ioc_status) {
5814 case MPI2_IOCSTATUS_BUSY:
5815 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5816 scmd->result = SAM_STAT_BUSY;
5819 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5820 scmd->result = DID_NO_CONNECT << 16;
5823 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5824 if (sas_device_priv_data->block) {
5825 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5828 if (log_info == 0x31110630) {
5829 if (scmd->retries > 2) {
5830 scmd->result = DID_NO_CONNECT << 16;
5831 scsi_device_set_state(scmd->device,
5834 scmd->result = DID_SOFT_ERROR << 16;
5835 scmd->device->expecting_cc_ua = 1;
5838 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5839 scmd->result = DID_RESET << 16;
5841 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5842 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5843 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5844 scmd->result = DID_RESET << 16;
5847 scmd->result = DID_SOFT_ERROR << 16;
5849 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5850 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5851 scmd->result = DID_RESET << 16;
5854 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5855 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5856 scmd->result = DID_SOFT_ERROR << 16;
5858 scmd->result = (DID_OK << 16) | scsi_status;
5861 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5862 scmd->result = (DID_OK << 16) | scsi_status;
5864 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5867 if (xfer_cnt < scmd->underflow) {
5868 if (scsi_status == SAM_STAT_BUSY)
5869 scmd->result = SAM_STAT_BUSY;
5871 scmd->result = DID_SOFT_ERROR << 16;
5872 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5873 MPI2_SCSI_STATE_NO_SCSI_STATUS))
5874 scmd->result = DID_SOFT_ERROR << 16;
5875 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5876 scmd->result = DID_RESET << 16;
5877 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5878 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5879 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5880 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
5885 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5886 scsi_set_resid(scmd, 0);
5888 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5889 case MPI2_IOCSTATUS_SUCCESS:
5890 scmd->result = (DID_OK << 16) | scsi_status;
5891 if (response_code ==
5892 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5893 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5894 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5895 scmd->result = DID_SOFT_ERROR << 16;
5896 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5897 scmd->result = DID_RESET << 16;
5900 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5901 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5902 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5903 _scsih_eedp_error_handling(scmd, ioc_status);
5906 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5907 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5908 case MPI2_IOCSTATUS_INVALID_SGL:
5909 case MPI2_IOCSTATUS_INTERNAL_ERROR:
5910 case MPI2_IOCSTATUS_INVALID_FIELD:
5911 case MPI2_IOCSTATUS_INVALID_STATE:
5912 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5913 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5914 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5916 scmd->result = DID_SOFT_ERROR << 16;
5921 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5922 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5926 scsi_dma_unmap(scmd);
5927 mpt3sas_base_free_smid(ioc, smid);
5928 scmd->scsi_done(scmd);
5933 * _scsih_update_vphys_after_reset - update the Port's
5934 * vphys_list after reset
5935 * @ioc: per adapter object
5940 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5944 Mpi2ConfigReply_t mpi_reply;
5945 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5946 u16 attached_handle;
5947 u64 attached_sas_addr;
5948 u8 found = 0, port_id;
5949 Mpi2SasPhyPage0_t phy_pg0;
5950 struct hba_port *port, *port_next, *mport;
5951 struct virtual_phy *vphy, *vphy_next;
5952 struct _sas_device *sas_device;
5955 * Mark all the vphys objects as dirty.
5957 list_for_each_entry_safe(port, port_next,
5958 &ioc->port_table_list, list) {
5959 if (!port->vphys_mask)
5961 list_for_each_entry_safe(vphy, vphy_next,
5962 &port->vphys_list, list) {
5963 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5968 * Read SASIOUnitPage0 to get each HBA Phy's data.
5970 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) +
5971 (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t));
5972 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5973 if (!sas_iounit_pg0) {
5974 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5975 __FILE__, __LINE__, __func__);
5978 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5979 sas_iounit_pg0, sz)) != 0)
5981 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5982 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5985 * Loop over each HBA Phy.
5987 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5989 * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
5991 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
5992 MPI2_SAS_NEG_LINK_RATE_1_5)
5995 * Check whether Phy is connected to SEP device or not,
5996 * if it is SEP device then read the Phy's SASPHYPage0 data to
5997 * determine whether Phy is a virtual Phy or not. if it is
5998 * virtual phy then it is conformed that the attached remote
5999 * device is a HBA's vSES device.
6002 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6003 MPI2_SAS_DEVICE_INFO_SEP))
6006 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6008 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6009 __FILE__, __LINE__, __func__);
6013 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6014 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6017 * Get the vSES device's SAS Address.
6019 attached_handle = le16_to_cpu(
6020 sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6021 if (_scsih_get_sas_address(ioc, attached_handle,
6022 &attached_sas_addr) != 0) {
6023 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6024 __FILE__, __LINE__, __func__);
6029 port = port_next = NULL;
6031 * Loop over each virtual_phy object from
6032 * each port's vphys_list.
6034 list_for_each_entry_safe(port,
6035 port_next, &ioc->port_table_list, list) {
6036 if (!port->vphys_mask)
6038 list_for_each_entry_safe(vphy, vphy_next,
6039 &port->vphys_list, list) {
6041 * Continue with next virtual_phy object
6042 * if the object is not marked as dirty.
6044 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6048 * Continue with next virtual_phy object
6049 * if the object's SAS Address is not equals
6050 * to current Phy's vSES device SAS Address.
6052 if (vphy->sas_address != attached_sas_addr)
6055 * Enable current Phy number bit in object's
6058 if (!(vphy->phy_mask & (1 << i)))
6059 vphy->phy_mask = (1 << i);
6061 * Get hba_port object from hba_port table
6062 * corresponding to current phy's Port ID.
6063 * if there is no hba_port object corresponding
6064 * to Phy's Port ID then create a new hba_port
6065 * object & add to hba_port table.
6067 port_id = sas_iounit_pg0->PhyData[i].Port;
6068 mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6071 sizeof(struct hba_port), GFP_KERNEL);
6074 mport->port_id = port_id;
6076 "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6077 __func__, mport, mport->port_id);
6078 list_add_tail(&mport->list,
6079 &ioc->port_table_list);
6082 * If mport & port pointers are not pointing to
6083 * same hba_port object then it means that vSES
6084 * device's Port ID got changed after reset and
6085 * hence move current virtual_phy object from
6086 * port's vphys_list to mport's vphys_list.
6088 if (port != mport) {
6089 if (!mport->vphys_mask)
6091 &mport->vphys_list);
6092 mport->vphys_mask |= (1 << i);
6093 port->vphys_mask &= ~(1 << i);
6094 list_move(&vphy->list,
6095 &mport->vphys_list);
6096 sas_device = mpt3sas_get_sdev_by_addr(
6097 ioc, attached_sas_addr, port);
6099 sas_device->port = mport;
6102 * Earlier while updating the hba_port table,
6103 * it is determined that there is no other
6104 * direct attached device with mport's Port ID,
6105 * Hence mport was marked as dirty. Only vSES
6106 * device has this Port ID, so unmark the mport
6109 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6110 mport->sas_address = 0;
6111 mport->phy_mask = 0;
6113 ~HBA_PORT_FLAG_DIRTY_PORT;
6116 * Unmark current virtual_phy object as dirty.
6118 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6127 kfree(sas_iounit_pg0);
6131 * _scsih_get_port_table_after_reset - Construct temporary port table
6132 * @ioc: per adapter object
6133 * @port_table: address where port table needs to be constructed
6135 * return number of HBA port entries available after reset.
6138 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6139 struct hba_port *port_table)
6143 Mpi2ConfigReply_t mpi_reply;
6144 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6145 u16 attached_handle;
6146 u64 attached_sas_addr;
6147 u8 found = 0, port_count = 0, port_id;
6149 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6150 * sizeof(Mpi2SasIOUnit0PhyData_t));
6151 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6152 if (!sas_iounit_pg0) {
6153 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6154 __FILE__, __LINE__, __func__);
6158 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6159 sas_iounit_pg0, sz)) != 0)
6161 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6162 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6164 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6166 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6167 MPI2_SAS_NEG_LINK_RATE_1_5)
6170 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6171 if (_scsih_get_sas_address(
6172 ioc, attached_handle, &attached_sas_addr) != 0) {
6173 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6174 __FILE__, __LINE__, __func__);
6178 for (j = 0; j < port_count; j++) {
6179 port_id = sas_iounit_pg0->PhyData[i].Port;
6180 if (port_table[j].port_id == port_id &&
6181 port_table[j].sas_address == attached_sas_addr) {
6182 port_table[j].phy_mask |= (1 << i);
6191 port_id = sas_iounit_pg0->PhyData[i].Port;
6192 port_table[port_count].port_id = port_id;
6193 port_table[port_count].phy_mask = (1 << i);
6194 port_table[port_count].sas_address = attached_sas_addr;
6198 kfree(sas_iounit_pg0);
6202 enum hba_port_matched_codes {
6204 MATCHED_WITH_ADDR_AND_PHYMASK,
6205 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6206 MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6211 * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
6212 * from HBA port table
6213 * @ioc: per adapter object
6214 * @port_entry: hba port entry from temporary port table which needs to be
6215 * searched for matched entry in the HBA port table
6216 * @matched_port_entry: save matched hba port entry here
6217 * @count: count of matched entries
6219 * return type of matched entry found.
6221 static enum hba_port_matched_codes
6222 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6223 struct hba_port *port_entry,
6224 struct hba_port **matched_port_entry, int *count)
6226 struct hba_port *port_table_entry, *matched_port = NULL;
6227 enum hba_port_matched_codes matched_code = NOT_MATCHED;
6229 *matched_port_entry = NULL;
6231 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6232 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6235 if ((port_table_entry->sas_address == port_entry->sas_address)
6236 && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6237 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6238 matched_port = port_table_entry;
6242 if ((port_table_entry->sas_address == port_entry->sas_address)
6243 && (port_table_entry->phy_mask & port_entry->phy_mask)
6244 && (port_table_entry->port_id == port_entry->port_id)) {
6245 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6246 matched_port = port_table_entry;
6250 if ((port_table_entry->sas_address == port_entry->sas_address)
6251 && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6253 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6255 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6256 matched_port = port_table_entry;
6260 if (port_table_entry->sas_address == port_entry->sas_address) {
6262 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6264 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6266 matched_code = MATCHED_WITH_ADDR;
6267 matched_port = port_table_entry;
6272 *matched_port_entry = matched_port;
6273 if (matched_code == MATCHED_WITH_ADDR)
6275 return matched_code;
6279 * _scsih_del_phy_part_of_anther_port - remove phy if it
6280 * is a part of anther port
6281 *@ioc: per adapter object
6282 *@port_table: port table after reset
6283 *@index: hba port entry index
6284 *@port_count: number of ports available after host reset
6285 *@offset: HBA phy bit offset
6289 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6290 struct hba_port *port_table,
6291 int index, u8 port_count, int offset)
6293 struct _sas_node *sas_node = &ioc->sas_hba;
6296 for (i = 0; i < port_count; i++) {
6300 if (port_table[i].phy_mask & (1 << offset)) {
6301 mpt3sas_transport_del_phy_from_an_existing_port(
6302 ioc, sas_node, &sas_node->phy[offset]);
6308 port_table[index].phy_mask |= (1 << offset);
6312 * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
6314 *@ioc: per adapter object
6315 *@hba_port_entry: hba port table entry
6316 *@port_table: temporary port table
6317 *@index: hba port entry index
6318 *@port_count: number of ports available after host reset
6322 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6323 struct hba_port *hba_port_entry, struct hba_port *port_table,
6324 int index, int port_count)
6326 u32 phy_mask, offset = 0;
6327 struct _sas_node *sas_node = &ioc->sas_hba;
6329 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6331 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6332 if (phy_mask & (1 << offset)) {
6333 if (!(port_table[index].phy_mask & (1 << offset))) {
6334 _scsih_del_phy_part_of_anther_port(
6335 ioc, port_table, index, port_count,
6339 if (sas_node->phy[offset].phy_belongs_to_port)
6340 mpt3sas_transport_del_phy_from_an_existing_port(
6341 ioc, sas_node, &sas_node->phy[offset]);
6342 mpt3sas_transport_add_phy_to_an_existing_port(
6343 ioc, sas_node, &sas_node->phy[offset],
6344 hba_port_entry->sas_address,
6351 * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
6352 * @ioc: per adapter object
6357 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6359 struct hba_port *port, *port_next;
6360 struct virtual_phy *vphy, *vphy_next;
6362 list_for_each_entry_safe(port, port_next,
6363 &ioc->port_table_list, list) {
6364 if (!port->vphys_mask)
6366 list_for_each_entry_safe(vphy, vphy_next,
6367 &port->vphys_list, list) {
6368 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6369 drsprintk(ioc, ioc_info(ioc,
6370 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6371 vphy, port->port_id,
6373 port->vphys_mask &= ~vphy->phy_mask;
6374 list_del(&vphy->list);
6378 if (!port->vphys_mask && !port->sas_address)
6379 port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6384 * _scsih_del_dirty_port_entries - delete dirty port entries from port list
6386 *@ioc: per adapter object
6390 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6392 struct hba_port *port, *port_next;
6394 list_for_each_entry_safe(port, port_next,
6395 &ioc->port_table_list, list) {
6396 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6397 port->flags & HBA_PORT_FLAG_NEW_PORT)
6400 drsprintk(ioc, ioc_info(ioc,
6401 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6402 port, port->port_id, port->phy_mask));
6403 list_del(&port->list);
6409 * _scsih_sas_port_refresh - Update HBA port table after host reset
6410 * @ioc: per adapter object
6413 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6416 struct hba_port *port_table;
6417 struct hba_port *port_table_entry;
6418 struct hba_port *port_entry = NULL;
6419 int i, j, count = 0, lcount = 0;
6423 drsprintk(ioc, ioc_info(ioc,
6424 "updating ports for sas_host(0x%016llx)\n",
6425 (unsigned long long)ioc->sas_hba.sas_address));
6427 port_table = kcalloc(ioc->sas_hba.num_phys,
6428 sizeof(struct hba_port), GFP_KERNEL);
6432 port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6436 drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6437 for (j = 0; j < port_count; j++)
6438 drsprintk(ioc, ioc_info(ioc,
6439 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6440 port_table[j].port_id,
6441 port_table[j].phy_mask, port_table[j].sas_address));
6443 list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6444 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6446 drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6447 port_table_entry = NULL;
6448 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6449 drsprintk(ioc, ioc_info(ioc,
6450 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6451 port_table_entry->port_id,
6452 port_table_entry->phy_mask,
6453 port_table_entry->sas_address));
6456 for (j = 0; j < port_count; j++) {
6457 ret = _scsih_look_and_get_matched_port_entry(ioc,
6458 &port_table[j], &port_entry, &count);
6460 drsprintk(ioc, ioc_info(ioc,
6461 "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6462 port_table[j].sas_address,
6463 port_table[j].port_id));
6468 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6469 case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6470 _scsih_add_or_del_phys_from_existing_port(ioc,
6471 port_entry, port_table, j, port_count);
6473 case MATCHED_WITH_ADDR:
6474 sas_addr = port_table[j].sas_address;
6475 for (i = 0; i < port_count; i++) {
6476 if (port_table[i].sas_address == sas_addr)
6480 if (count > 1 || lcount > 1)
6483 _scsih_add_or_del_phys_from_existing_port(ioc,
6484 port_entry, port_table, j, port_count);
6490 if (port_entry->port_id != port_table[j].port_id)
6491 port_entry->port_id = port_table[j].port_id;
6492 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6493 port_entry->phy_mask = port_table[j].phy_mask;
6496 port_table_entry = NULL;
6500 * _scsih_alloc_vphy - allocate virtual_phy object
6501 * @ioc: per adapter object
6502 * @port_id: Port ID number
6503 * @phy_num: HBA Phy number
6505 * Returns allocated virtual_phy object.
6507 static struct virtual_phy *
6508 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6510 struct virtual_phy *vphy;
6511 struct hba_port *port;
6513 port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6517 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6519 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6523 if (!port->vphys_mask)
6524 INIT_LIST_HEAD(&port->vphys_list);
6527 * Enable bit corresponding to HBA phy number on its
6528 * parent hba_port object's vphys_mask field.
6530 port->vphys_mask |= (1 << phy_num);
6531 vphy->phy_mask |= (1 << phy_num);
6533 list_add_tail(&vphy->list, &port->vphys_list);
6536 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6537 vphy, port->port_id, phy_num);
6543 * _scsih_sas_host_refresh - refreshing sas host object contents
6544 * @ioc: per adapter object
6547 * During port enable, fw will send topology events for every device. Its
6548 * possible that the handles may change from the previous setting, so this
6549 * code keeping handles updating if changed.
6552 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6557 Mpi2ConfigReply_t mpi_reply;
6558 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6559 u16 attached_handle;
6560 u8 link_rate, port_id;
6561 struct hba_port *port;
6562 Mpi2SasPhyPage0_t phy_pg0;
6565 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6566 (u64)ioc->sas_hba.sas_address));
6568 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6569 * sizeof(Mpi2SasIOUnit0PhyData_t));
6570 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6571 if (!sas_iounit_pg0) {
6572 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6573 __FILE__, __LINE__, __func__);
6577 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6578 sas_iounit_pg0, sz)) != 0)
6580 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6581 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6583 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6584 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6586 ioc->sas_hba.handle = le16_to_cpu(
6587 sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6588 port_id = sas_iounit_pg0->PhyData[i].Port;
6589 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6590 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6594 port->port_id = port_id;
6596 "hba_port entry: %p, port: %d is added to hba_port list\n",
6597 port, port->port_id);
6598 if (ioc->shost_recovery)
6599 port->flags = HBA_PORT_FLAG_NEW_PORT;
6600 list_add_tail(&port->list, &ioc->port_table_list);
6603 * Check whether current Phy belongs to HBA vSES device or not.
6605 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6606 MPI2_SAS_DEVICE_INFO_SEP &&
6607 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
6608 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6611 "failure at %s:%d/%s()!\n",
6612 __FILE__, __LINE__, __func__);
6615 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6616 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6619 * Allocate a virtual_phy object for vSES device, if
6620 * this vSES device is hot added.
6622 if (!_scsih_alloc_vphy(ioc, port_id, i))
6624 ioc->sas_hba.phy[i].hba_vphy = 1;
6627 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6628 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6630 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6631 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6632 ioc->sas_hba.phy[i].port =
6633 mpt3sas_get_port_by_id(ioc, port_id, 0);
6634 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6635 attached_handle, i, link_rate,
6636 ioc->sas_hba.phy[i].port);
6639 kfree(sas_iounit_pg0);
6643 * _scsih_sas_host_add - create sas host object
6644 * @ioc: per adapter object
6646 * Creating host side data object, stored in ioc->sas_hba
6649 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6652 Mpi2ConfigReply_t mpi_reply;
6653 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6654 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6655 Mpi2SasPhyPage0_t phy_pg0;
6656 Mpi2SasDevicePage0_t sas_device_pg0;
6657 Mpi2SasEnclosurePage0_t enclosure_pg0;
6660 u8 device_missing_delay;
6661 u8 num_phys, port_id;
6662 struct hba_port *port;
6664 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6666 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6667 __FILE__, __LINE__, __func__);
6670 ioc->sas_hba.phy = kcalloc(num_phys,
6671 sizeof(struct _sas_phy), GFP_KERNEL);
6672 if (!ioc->sas_hba.phy) {
6673 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6674 __FILE__, __LINE__, __func__);
6677 ioc->sas_hba.num_phys = num_phys;
6679 /* sas_iounit page 0 */
6680 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
6681 sizeof(Mpi2SasIOUnit0PhyData_t));
6682 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6683 if (!sas_iounit_pg0) {
6684 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6685 __FILE__, __LINE__, __func__);
6688 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6689 sas_iounit_pg0, sz))) {
6690 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6691 __FILE__, __LINE__, __func__);
6694 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6695 MPI2_IOCSTATUS_MASK;
6696 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6697 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6698 __FILE__, __LINE__, __func__);
6702 /* sas_iounit page 1 */
6703 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
6704 sizeof(Mpi2SasIOUnit1PhyData_t));
6705 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6706 if (!sas_iounit_pg1) {
6707 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6708 __FILE__, __LINE__, __func__);
6711 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6712 sas_iounit_pg1, sz))) {
6713 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6714 __FILE__, __LINE__, __func__);
6717 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6718 MPI2_IOCSTATUS_MASK;
6719 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6720 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6721 __FILE__, __LINE__, __func__);
6725 ioc->io_missing_delay =
6726 sas_iounit_pg1->IODeviceMissingDelay;
6727 device_missing_delay =
6728 sas_iounit_pg1->ReportDeviceMissingDelay;
6729 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6730 ioc->device_missing_delay = (device_missing_delay &
6731 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6733 ioc->device_missing_delay = device_missing_delay &
6734 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6736 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
6737 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6738 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6740 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6741 __FILE__, __LINE__, __func__);
6744 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6745 MPI2_IOCSTATUS_MASK;
6746 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6747 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6748 __FILE__, __LINE__, __func__);
6753 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6754 PhyData[0].ControllerDevHandle);
6756 port_id = sas_iounit_pg0->PhyData[i].Port;
6757 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6758 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6762 port->port_id = port_id;
6764 "hba_port entry: %p, port: %d is added to hba_port list\n",
6765 port, port->port_id);
6766 list_add_tail(&port->list,
6767 &ioc->port_table_list);
6771 * Check whether current Phy belongs to HBA vSES device or not.
6773 if ((le32_to_cpu(phy_pg0.PhyInfo) &
6774 MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6775 (phy_pg0.NegotiatedLinkRate >> 4) >=
6776 MPI2_SAS_NEG_LINK_RATE_1_5) {
6778 * Allocate a virtual_phy object for vSES device.
6780 if (!_scsih_alloc_vphy(ioc, port_id, i))
6782 ioc->sas_hba.phy[i].hba_vphy = 1;
6785 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6786 ioc->sas_hba.phy[i].phy_id = i;
6787 ioc->sas_hba.phy[i].port =
6788 mpt3sas_get_port_by_id(ioc, port_id, 0);
6789 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6790 phy_pg0, ioc->sas_hba.parent_dev);
6792 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6793 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
6794 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6795 __FILE__, __LINE__, __func__);
6798 ioc->sas_hba.enclosure_handle =
6799 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6800 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6801 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6802 ioc->sas_hba.handle,
6803 (u64)ioc->sas_hba.sas_address,
6804 ioc->sas_hba.num_phys);
6806 if (ioc->sas_hba.enclosure_handle) {
6807 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6808 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6809 ioc->sas_hba.enclosure_handle)))
6810 ioc->sas_hba.enclosure_logical_id =
6811 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6815 kfree(sas_iounit_pg1);
6816 kfree(sas_iounit_pg0);
6820 * _scsih_expander_add - creating expander object
6821 * @ioc: per adapter object
6822 * @handle: expander handle
6824 * Creating expander object, stored in ioc->sas_expander_list.
6826 * Return: 0 for success, else error.
6829 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6831 struct _sas_node *sas_expander;
6832 struct _enclosure_node *enclosure_dev;
6833 Mpi2ConfigReply_t mpi_reply;
6834 Mpi2ExpanderPage0_t expander_pg0;
6835 Mpi2ExpanderPage1_t expander_pg1;
6838 u64 sas_address, sas_address_parent = 0;
6840 unsigned long flags;
6841 struct _sas_port *mpt3sas_port = NULL;
6849 if (ioc->shost_recovery || ioc->pci_error_recovery)
6852 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6853 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
6854 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6855 __FILE__, __LINE__, __func__);
6859 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6860 MPI2_IOCSTATUS_MASK;
6861 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6862 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6863 __FILE__, __LINE__, __func__);
6867 /* handle out of order topology events */
6868 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6869 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6871 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6872 __FILE__, __LINE__, __func__);
6876 port_id = expander_pg0.PhysicalPort;
6877 if (sas_address_parent != ioc->sas_hba.sas_address) {
6878 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6879 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6881 mpt3sas_get_port_by_id(ioc, port_id, 0));
6882 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6883 if (!sas_expander) {
6884 rc = _scsih_expander_add(ioc, parent_handle);
6890 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6891 sas_address = le64_to_cpu(expander_pg0.SASAddress);
6892 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6893 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
6894 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6899 sas_expander = kzalloc(sizeof(struct _sas_node),
6901 if (!sas_expander) {
6902 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6903 __FILE__, __LINE__, __func__);
6907 sas_expander->handle = handle;
6908 sas_expander->num_phys = expander_pg0.NumPhys;
6909 sas_expander->sas_address_parent = sas_address_parent;
6910 sas_expander->sas_address = sas_address;
6911 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6912 if (!sas_expander->port) {
6913 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6914 __FILE__, __LINE__, __func__);
6919 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6920 handle, parent_handle,
6921 (u64)sas_expander->sas_address, sas_expander->num_phys);
6923 if (!sas_expander->num_phys) {
6927 sas_expander->phy = kcalloc(sas_expander->num_phys,
6928 sizeof(struct _sas_phy), GFP_KERNEL);
6929 if (!sas_expander->phy) {
6930 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6931 __FILE__, __LINE__, __func__);
6936 INIT_LIST_HEAD(&sas_expander->sas_port_list);
6937 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6938 sas_address_parent, sas_expander->port);
6939 if (!mpt3sas_port) {
6940 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6941 __FILE__, __LINE__, __func__);
6945 sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
6946 sas_expander->rphy = mpt3sas_port->rphy;
6948 for (i = 0 ; i < sas_expander->num_phys ; i++) {
6949 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
6950 &expander_pg1, i, handle))) {
6951 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6952 __FILE__, __LINE__, __func__);
6956 sas_expander->phy[i].handle = handle;
6957 sas_expander->phy[i].phy_id = i;
6958 sas_expander->phy[i].port =
6959 mpt3sas_get_port_by_id(ioc, port_id, 0);
6961 if ((mpt3sas_transport_add_expander_phy(ioc,
6962 &sas_expander->phy[i], expander_pg1,
6963 sas_expander->parent_dev))) {
6964 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6965 __FILE__, __LINE__, __func__);
6971 if (sas_expander->enclosure_handle) {
6973 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6974 sas_expander->enclosure_handle);
6976 sas_expander->enclosure_logical_id =
6977 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6980 _scsih_expander_node_add(ioc, sas_expander);
6986 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
6987 sas_address_parent, sas_expander->port);
6988 kfree(sas_expander);
6993 * mpt3sas_expander_remove - removing expander object
6994 * @ioc: per adapter object
6995 * @sas_address: expander sas_address
6996 * @port: hba port entry
6999 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7000 struct hba_port *port)
7002 struct _sas_node *sas_expander;
7003 unsigned long flags;
7005 if (ioc->shost_recovery)
7011 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7012 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
7014 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7016 _scsih_expander_node_remove(ioc, sas_expander);
7020 * _scsih_done - internal SCSI_IO callback handler.
7021 * @ioc: per adapter object
7022 * @smid: system request message index
7023 * @msix_index: MSIX table index supplied by the OS
7024 * @reply: reply message frame(lower 32bit addr)
7026 * Callback handler when sending internal generated SCSI_IO.
7027 * The callback index passed is `ioc->scsih_cb_idx`
7029 * Return: 1 meaning mf should be freed from _base_interrupt
7030 * 0 means the mf is freed from this function.
7033 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
7035 MPI2DefaultReply_t *mpi_reply;
7037 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
7038 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
7040 if (ioc->scsih_cmds.smid != smid)
7042 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
7044 memcpy(ioc->scsih_cmds.reply, mpi_reply,
7045 mpi_reply->MsgLength*4);
7046 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
7048 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
7049 complete(&ioc->scsih_cmds.done);
7056 #define MPT3_MAX_LUNS (255)
7060 * _scsih_check_access_status - check access flags
7061 * @ioc: per adapter object
7062 * @sas_address: sas address
7063 * @handle: sas device handle
7064 * @access_status: errors returned during discovery of the device
7066 * Return: 0 for success, else failure
7069 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7070 u16 handle, u8 access_status)
7075 switch (access_status) {
7076 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7077 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7080 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7081 desc = "sata capability failed";
7083 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7084 desc = "sata affiliation conflict";
7086 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7087 desc = "route not addressable";
7089 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7090 desc = "smp error not addressable";
7092 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7093 desc = "device blocked";
7095 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7096 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7097 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7098 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7099 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7100 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7101 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7102 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7103 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7104 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7105 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7106 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7107 desc = "sata initialization failed";
7117 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7118 desc, (u64)sas_address, handle);
7123 * _scsih_check_device - checking device responsiveness
7124 * @ioc: per adapter object
7125 * @parent_sas_address: sas address of parent expander or sas host
7126 * @handle: attached device handle
7127 * @phy_number: phy number
7128 * @link_rate: new link rate
7131 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7132 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7134 Mpi2ConfigReply_t mpi_reply;
7135 Mpi2SasDevicePage0_t sas_device_pg0;
7136 struct _sas_device *sas_device = NULL;
7137 struct _enclosure_node *enclosure_dev = NULL;
7139 unsigned long flags;
7141 struct scsi_target *starget;
7142 struct MPT3SAS_TARGET *sas_target_priv_data;
7144 struct hba_port *port;
7146 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7147 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7150 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7151 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7154 /* wide port handling ~ we need only handle device once for the phy that
7155 * is matched in sas device page zero
7157 if (phy_number != sas_device_pg0.PhyNum)
7160 /* check if this is end device */
7161 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7162 if (!(_scsih_is_end_device(device_info)))
7165 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7166 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7167 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7170 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7176 if (unlikely(sas_device->handle != handle)) {
7177 starget = sas_device->starget;
7178 sas_target_priv_data = starget->hostdata;
7179 starget_printk(KERN_INFO, starget,
7180 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7181 sas_device->handle, handle);
7182 sas_target_priv_data->handle = handle;
7183 sas_device->handle = handle;
7184 if (le16_to_cpu(sas_device_pg0.Flags) &
7185 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7186 sas_device->enclosure_level =
7187 sas_device_pg0.EnclosureLevel;
7188 memcpy(sas_device->connector_name,
7189 sas_device_pg0.ConnectorName, 4);
7190 sas_device->connector_name[4] = '\0';
7192 sas_device->enclosure_level = 0;
7193 sas_device->connector_name[0] = '\0';
7196 sas_device->enclosure_handle =
7197 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7198 sas_device->is_chassis_slot_valid = 0;
7199 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7200 sas_device->enclosure_handle);
7201 if (enclosure_dev) {
7202 sas_device->enclosure_logical_id =
7203 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7204 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7205 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7206 sas_device->is_chassis_slot_valid = 1;
7207 sas_device->chassis_slot =
7208 enclosure_dev->pg0.ChassisSlot;
7213 /* check if device is present */
7214 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7215 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7216 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7221 /* check if there were any issues with discovery */
7222 if (_scsih_check_access_status(ioc, sas_address, handle,
7223 sas_device_pg0.AccessStatus))
7226 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7227 _scsih_ublock_io_device(ioc, sas_address, port);
7230 sas_device_put(sas_device);
7234 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7236 sas_device_put(sas_device);
7240 * _scsih_add_device - creating sas device object
7241 * @ioc: per adapter object
7242 * @handle: sas device handle
7243 * @phy_num: phy number end device attached to
7244 * @is_pd: is this hidden raid component
7246 * Creating end device object, stored in ioc->sas_device_list.
7248 * Return: 0 for success, non-zero for failure.
7251 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7254 Mpi2ConfigReply_t mpi_reply;
7255 Mpi2SasDevicePage0_t sas_device_pg0;
7256 struct _sas_device *sas_device;
7257 struct _enclosure_node *enclosure_dev = NULL;
7263 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7264 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7265 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7266 __FILE__, __LINE__, __func__);
7270 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7271 MPI2_IOCSTATUS_MASK;
7272 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7273 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7274 __FILE__, __LINE__, __func__);
7278 /* check if this is end device */
7279 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7280 if (!(_scsih_is_end_device(device_info)))
7282 set_bit(handle, ioc->pend_os_device_add);
7283 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7285 /* check if device is present */
7286 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7287 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7288 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7293 /* check if there were any issues with discovery */
7294 if (_scsih_check_access_status(ioc, sas_address, handle,
7295 sas_device_pg0.AccessStatus))
7298 port_id = sas_device_pg0.PhysicalPort;
7299 sas_device = mpt3sas_get_sdev_by_addr(ioc,
7300 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7302 clear_bit(handle, ioc->pend_os_device_add);
7303 sas_device_put(sas_device);
7307 if (sas_device_pg0.EnclosureHandle) {
7309 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7310 le16_to_cpu(sas_device_pg0.EnclosureHandle));
7311 if (enclosure_dev == NULL)
7312 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7313 sas_device_pg0.EnclosureHandle);
7316 sas_device = kzalloc(sizeof(struct _sas_device),
7319 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7320 __FILE__, __LINE__, __func__);
7324 kref_init(&sas_device->refcount);
7325 sas_device->handle = handle;
7326 if (_scsih_get_sas_address(ioc,
7327 le16_to_cpu(sas_device_pg0.ParentDevHandle),
7328 &sas_device->sas_address_parent) != 0)
7329 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7330 __FILE__, __LINE__, __func__);
7331 sas_device->enclosure_handle =
7332 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7333 if (sas_device->enclosure_handle != 0)
7335 le16_to_cpu(sas_device_pg0.Slot);
7336 sas_device->device_info = device_info;
7337 sas_device->sas_address = sas_address;
7338 sas_device->phy = sas_device_pg0.PhyNum;
7339 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7340 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7341 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7342 if (!sas_device->port) {
7343 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7344 __FILE__, __LINE__, __func__);
7348 if (le16_to_cpu(sas_device_pg0.Flags)
7349 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7350 sas_device->enclosure_level =
7351 sas_device_pg0.EnclosureLevel;
7352 memcpy(sas_device->connector_name,
7353 sas_device_pg0.ConnectorName, 4);
7354 sas_device->connector_name[4] = '\0';
7356 sas_device->enclosure_level = 0;
7357 sas_device->connector_name[0] = '\0';
7359 /* get enclosure_logical_id & chassis_slot*/
7360 sas_device->is_chassis_slot_valid = 0;
7361 if (enclosure_dev) {
7362 sas_device->enclosure_logical_id =
7363 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7364 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7365 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7366 sas_device->is_chassis_slot_valid = 1;
7367 sas_device->chassis_slot =
7368 enclosure_dev->pg0.ChassisSlot;
7372 /* get device name */
7373 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
7375 if (ioc->wait_for_discovery_to_complete)
7376 _scsih_sas_device_init_add(ioc, sas_device);
7378 _scsih_sas_device_add(ioc, sas_device);
7381 sas_device_put(sas_device);
7386 * _scsih_remove_device - removing sas device object
7387 * @ioc: per adapter object
7388 * @sas_device: the sas_device object
7391 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7392 struct _sas_device *sas_device)
7394 struct MPT3SAS_TARGET *sas_target_priv_data;
7396 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7397 (sas_device->pfa_led_on)) {
7398 _scsih_turn_off_pfa_led(ioc, sas_device);
7399 sas_device->pfa_led_on = 0;
7403 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7405 sas_device->handle, (u64)sas_device->sas_address));
7407 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7410 if (sas_device->starget && sas_device->starget->hostdata) {
7411 sas_target_priv_data = sas_device->starget->hostdata;
7412 sas_target_priv_data->deleted = 1;
7413 _scsih_ublock_io_device(ioc, sas_device->sas_address,
7415 sas_target_priv_data->handle =
7416 MPT3SAS_INVALID_DEVICE_HANDLE;
7419 if (!ioc->hide_drives)
7420 mpt3sas_transport_port_remove(ioc,
7421 sas_device->sas_address,
7422 sas_device->sas_address_parent,
7425 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7426 sas_device->handle, (u64)sas_device->sas_address);
7428 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
7431 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7433 sas_device->handle, (u64)sas_device->sas_address));
7434 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7439 * _scsih_sas_topology_change_event_debug - debug for topology event
7440 * @ioc: per adapter object
7441 * @event_data: event data payload
7445 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7446 Mpi2EventDataSasTopologyChangeList_t *event_data)
7452 char *status_str = NULL;
7453 u8 link_rate, prev_link_rate;
7455 switch (event_data->ExpStatus) {
7456 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7459 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7460 status_str = "remove";
7462 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7464 status_str = "responding";
7466 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7467 status_str = "remove delay";
7470 status_str = "unknown status";
7473 ioc_info(ioc, "sas topology change: (%s)\n", status_str);
7474 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7475 "start_phy(%02d), count(%d)\n",
7476 le16_to_cpu(event_data->ExpanderDevHandle),
7477 le16_to_cpu(event_data->EnclosureHandle),
7478 event_data->StartPhyNum, event_data->NumEntries);
7479 for (i = 0; i < event_data->NumEntries; i++) {
7480 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7483 phy_number = event_data->StartPhyNum + i;
7484 reason_code = event_data->PHY[i].PhyStatus &
7485 MPI2_EVENT_SAS_TOPO_RC_MASK;
7486 switch (reason_code) {
7487 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7488 status_str = "target add";
7490 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7491 status_str = "target remove";
7493 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7494 status_str = "delay target remove";
7496 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7497 status_str = "link rate change";
7499 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7500 status_str = "target responding";
7503 status_str = "unknown";
7506 link_rate = event_data->PHY[i].LinkRate >> 4;
7507 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7508 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7509 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7510 handle, status_str, link_rate, prev_link_rate);
7516 * _scsih_sas_topology_change_event - handle topology changes
7517 * @ioc: per adapter object
7518 * @fw_event: The fw_event_work object
7523 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7524 struct fw_event_work *fw_event)
7527 u16 parent_handle, handle;
7529 u8 phy_number, max_phys;
7530 struct _sas_node *sas_expander;
7532 unsigned long flags;
7533 u8 link_rate, prev_link_rate;
7534 struct hba_port *port;
7535 Mpi2EventDataSasTopologyChangeList_t *event_data =
7536 (Mpi2EventDataSasTopologyChangeList_t *)
7537 fw_event->event_data;
7539 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7540 _scsih_sas_topology_change_event_debug(ioc, event_data);
7542 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7545 if (!ioc->sas_hba.num_phys)
7546 _scsih_sas_host_add(ioc);
7548 _scsih_sas_host_refresh(ioc);
7550 if (fw_event->ignore) {
7551 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
7555 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
7556 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
7558 /* handle expander add */
7559 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7560 if (_scsih_expander_add(ioc, parent_handle) != 0)
7563 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7564 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7567 sas_address = sas_expander->sas_address;
7568 max_phys = sas_expander->num_phys;
7569 port = sas_expander->port;
7570 } else if (parent_handle < ioc->sas_hba.num_phys) {
7571 sas_address = ioc->sas_hba.sas_address;
7572 max_phys = ioc->sas_hba.num_phys;
7574 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7577 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7579 /* handle siblings events */
7580 for (i = 0; i < event_data->NumEntries; i++) {
7581 if (fw_event->ignore) {
7583 ioc_info(ioc, "ignoring expander event\n"));
7586 if (ioc->remove_host || ioc->pci_error_recovery)
7588 phy_number = event_data->StartPhyNum + i;
7589 if (phy_number >= max_phys)
7591 reason_code = event_data->PHY[i].PhyStatus &
7592 MPI2_EVENT_SAS_TOPO_RC_MASK;
7593 if ((event_data->PHY[i].PhyStatus &
7594 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7595 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7597 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7600 link_rate = event_data->PHY[i].LinkRate >> 4;
7601 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7602 switch (reason_code) {
7603 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7605 if (ioc->shost_recovery)
7608 if (link_rate == prev_link_rate)
7611 mpt3sas_transport_update_links(ioc, sas_address,
7612 handle, phy_number, link_rate, port);
7614 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7617 _scsih_check_device(ioc, sas_address, handle,
7618 phy_number, link_rate);
7620 if (!test_bit(handle, ioc->pend_os_device_add))
7625 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7627 if (ioc->shost_recovery)
7630 mpt3sas_transport_update_links(ioc, sas_address,
7631 handle, phy_number, link_rate, port);
7633 _scsih_add_device(ioc, handle, phy_number, 0);
7636 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7638 _scsih_device_remove_by_handle(ioc, handle);
7643 /* handle expander removal */
7644 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7646 mpt3sas_expander_remove(ioc, sas_address, port);
7652 * _scsih_sas_device_status_change_event_debug - debug for device event
7654 * @event_data: event data payload
7658 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7659 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7661 char *reason_str = NULL;
7663 switch (event_data->ReasonCode) {
7664 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7665 reason_str = "smart data";
7667 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7668 reason_str = "unsupported device discovered";
7670 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7671 reason_str = "internal device reset";
7673 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7674 reason_str = "internal task abort";
7676 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7677 reason_str = "internal task abort set";
7679 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7680 reason_str = "internal clear task set";
7682 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7683 reason_str = "internal query task";
7685 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7686 reason_str = "sata init failure";
7688 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7689 reason_str = "internal device reset complete";
7691 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7692 reason_str = "internal task abort complete";
7694 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7695 reason_str = "internal async notification";
7697 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7698 reason_str = "expander reduced functionality";
7700 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7701 reason_str = "expander reduced functionality complete";
7704 reason_str = "unknown reason";
7707 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7708 reason_str, le16_to_cpu(event_data->DevHandle),
7709 (u64)le64_to_cpu(event_data->SASAddress),
7710 le16_to_cpu(event_data->TaskTag));
7711 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
7712 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7713 event_data->ASC, event_data->ASCQ);
7718 * _scsih_sas_device_status_change_event - handle device status change
7719 * @ioc: per adapter object
7720 * @event_data: The fw event
7724 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7725 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7727 struct MPT3SAS_TARGET *target_priv_data;
7728 struct _sas_device *sas_device;
7730 unsigned long flags;
7732 /* In MPI Revision K (0xC), the internal device reset complete was
7733 * implemented, so avoid setting tm_busy flag for older firmware.
7735 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7738 if (event_data->ReasonCode !=
7739 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7740 event_data->ReasonCode !=
7741 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7744 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7745 sas_address = le64_to_cpu(event_data->SASAddress);
7746 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7748 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
7750 if (!sas_device || !sas_device->starget)
7753 target_priv_data = sas_device->starget->hostdata;
7754 if (!target_priv_data)
7757 if (event_data->ReasonCode ==
7758 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7759 target_priv_data->tm_busy = 1;
7761 target_priv_data->tm_busy = 0;
7763 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7765 "%s tm_busy flag for handle(0x%04x)\n",
7766 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7767 target_priv_data->handle);
7771 sas_device_put(sas_device);
7773 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7778 * _scsih_check_pcie_access_status - check access flags
7779 * @ioc: per adapter object
7781 * @handle: sas device handle
7782 * @access_status: errors returned during discovery of the device
7784 * Return: 0 for success, else failure
7787 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7788 u16 handle, u8 access_status)
7793 switch (access_status) {
7794 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7795 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7798 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7799 desc = "PCIe device capability failed";
7801 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7802 desc = "PCIe device blocked";
7804 "Device with Access Status (%s): wwid(0x%016llx), "
7805 "handle(0x%04x)\n ll only be added to the internal list",
7806 desc, (u64)wwid, handle);
7809 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7810 desc = "PCIe device mem space access failed";
7812 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7813 desc = "PCIe device unsupported";
7815 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7816 desc = "PCIe device MSIx Required";
7818 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7819 desc = "PCIe device init fail max";
7821 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7822 desc = "PCIe device status unknown";
7824 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7825 desc = "nvme ready timeout";
7827 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7828 desc = "nvme device configuration unsupported";
7830 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7831 desc = "nvme identify failed";
7833 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7834 desc = "nvme qconfig failed";
7836 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7837 desc = "nvme qcreation failed";
7839 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7840 desc = "nvme eventcfg failed";
7842 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7843 desc = "nvme get feature stat failed";
7845 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7846 desc = "nvme idle timeout";
7848 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7849 desc = "nvme failure status";
7852 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7853 access_status, (u64)wwid, handle);
7860 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7861 desc, (u64)wwid, handle);
7866 * _scsih_pcie_device_remove_from_sml - removing pcie device
7867 * from SML and free up associated memory
7868 * @ioc: per adapter object
7869 * @pcie_device: the pcie_device object
7872 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7873 struct _pcie_device *pcie_device)
7875 struct MPT3SAS_TARGET *sas_target_priv_data;
7878 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7880 pcie_device->handle, (u64)pcie_device->wwid));
7881 if (pcie_device->enclosure_handle != 0)
7883 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7885 (u64)pcie_device->enclosure_logical_id,
7886 pcie_device->slot));
7887 if (pcie_device->connector_name[0] != '\0')
7889 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7891 pcie_device->enclosure_level,
7892 pcie_device->connector_name));
7894 if (pcie_device->starget && pcie_device->starget->hostdata) {
7895 sas_target_priv_data = pcie_device->starget->hostdata;
7896 sas_target_priv_data->deleted = 1;
7897 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
7898 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7901 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7902 pcie_device->handle, (u64)pcie_device->wwid);
7903 if (pcie_device->enclosure_handle != 0)
7904 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7905 (u64)pcie_device->enclosure_logical_id,
7907 if (pcie_device->connector_name[0] != '\0')
7908 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7909 pcie_device->enclosure_level,
7910 pcie_device->connector_name);
7912 if (pcie_device->starget && (pcie_device->access_status !=
7913 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
7914 scsi_remove_target(&pcie_device->starget->dev);
7916 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7918 pcie_device->handle, (u64)pcie_device->wwid));
7919 if (pcie_device->enclosure_handle != 0)
7921 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7923 (u64)pcie_device->enclosure_logical_id,
7924 pcie_device->slot));
7925 if (pcie_device->connector_name[0] != '\0')
7927 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7929 pcie_device->enclosure_level,
7930 pcie_device->connector_name));
7932 kfree(pcie_device->serial_number);
7937 * _scsih_pcie_check_device - checking device responsiveness
7938 * @ioc: per adapter object
7939 * @handle: attached device handle
7942 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7944 Mpi2ConfigReply_t mpi_reply;
7945 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7947 struct _pcie_device *pcie_device;
7949 unsigned long flags;
7950 struct scsi_target *starget;
7951 struct MPT3SAS_TARGET *sas_target_priv_data;
7954 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7955 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
7958 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7959 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7962 /* check if this is end device */
7963 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
7964 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
7967 wwid = le64_to_cpu(pcie_device_pg0.WWID);
7968 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7969 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7972 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7976 if (unlikely(pcie_device->handle != handle)) {
7977 starget = pcie_device->starget;
7978 sas_target_priv_data = starget->hostdata;
7979 pcie_device->access_status = pcie_device_pg0.AccessStatus;
7980 starget_printk(KERN_INFO, starget,
7981 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7982 pcie_device->handle, handle);
7983 sas_target_priv_data->handle = handle;
7984 pcie_device->handle = handle;
7986 if (le32_to_cpu(pcie_device_pg0.Flags) &
7987 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
7988 pcie_device->enclosure_level =
7989 pcie_device_pg0.EnclosureLevel;
7990 memcpy(&pcie_device->connector_name[0],
7991 &pcie_device_pg0.ConnectorName[0], 4);
7993 pcie_device->enclosure_level = 0;
7994 pcie_device->connector_name[0] = '\0';
7998 /* check if device is present */
7999 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8000 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8001 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
8003 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8004 pcie_device_put(pcie_device);
8008 /* check if there were any issues with discovery */
8009 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8010 pcie_device_pg0.AccessStatus)) {
8011 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8012 pcie_device_put(pcie_device);
8016 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8017 pcie_device_put(pcie_device);
8019 _scsih_ublock_io_device(ioc, wwid, NULL);
8025 * _scsih_pcie_add_device - creating pcie device object
8026 * @ioc: per adapter object
8027 * @handle: pcie device handle
8029 * Creating end device object, stored in ioc->pcie_device_list.
8031 * Return: 1 means queue the event later, 0 means complete the event
8034 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8036 Mpi26PCIeDevicePage0_t pcie_device_pg0;
8037 Mpi26PCIeDevicePage2_t pcie_device_pg2;
8038 Mpi2ConfigReply_t mpi_reply;
8039 struct _pcie_device *pcie_device;
8040 struct _enclosure_node *enclosure_dev;
8044 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8045 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
8046 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8047 __FILE__, __LINE__, __func__);
8050 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8051 MPI2_IOCSTATUS_MASK;
8052 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8053 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8054 __FILE__, __LINE__, __func__);
8058 set_bit(handle, ioc->pend_os_device_add);
8059 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8061 /* check if device is present */
8062 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8063 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8064 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8069 /* check if there were any issues with discovery */
8070 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8071 pcie_device_pg0.AccessStatus))
8074 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8075 (pcie_device_pg0.DeviceInfo))))
8078 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8080 clear_bit(handle, ioc->pend_os_device_add);
8081 pcie_device_put(pcie_device);
8085 /* PCIe Device Page 2 contains read-only information about a
8086 * specific NVMe device; therefore, this page is only
8087 * valid for NVMe devices and skip for pcie devices of type scsi.
8089 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8090 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8091 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8092 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8095 "failure at %s:%d/%s()!\n", __FILE__,
8096 __LINE__, __func__);
8100 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8101 MPI2_IOCSTATUS_MASK;
8102 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8104 "failure at %s:%d/%s()!\n", __FILE__,
8105 __LINE__, __func__);
8110 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8112 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8113 __FILE__, __LINE__, __func__);
8117 kref_init(&pcie_device->refcount);
8118 pcie_device->id = ioc->pcie_target_id++;
8119 pcie_device->channel = PCIE_CHANNEL;
8120 pcie_device->handle = handle;
8121 pcie_device->access_status = pcie_device_pg0.AccessStatus;
8122 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8123 pcie_device->wwid = wwid;
8124 pcie_device->port_num = pcie_device_pg0.PortNum;
8125 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8126 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8128 pcie_device->enclosure_handle =
8129 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8130 if (pcie_device->enclosure_handle != 0)
8131 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8133 if (le32_to_cpu(pcie_device_pg0.Flags) &
8134 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8135 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8136 memcpy(&pcie_device->connector_name[0],
8137 &pcie_device_pg0.ConnectorName[0], 4);
8139 pcie_device->enclosure_level = 0;
8140 pcie_device->connector_name[0] = '\0';
8143 /* get enclosure_logical_id */
8144 if (pcie_device->enclosure_handle) {
8146 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8147 pcie_device->enclosure_handle);
8149 pcie_device->enclosure_logical_id =
8150 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8152 /* TODO -- Add device name once FW supports it */
8153 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8154 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8155 pcie_device->nvme_mdts =
8156 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
8157 pcie_device->shutdown_latency =
8158 le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8160 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
8161 * if drive's RTD3 Entry Latency is greater then IOC's
8162 * max_shutdown_latency.
8164 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8165 ioc->max_shutdown_latency =
8166 pcie_device->shutdown_latency;
8167 if (pcie_device_pg2.ControllerResetTO)
8168 pcie_device->reset_timeout =
8169 pcie_device_pg2.ControllerResetTO;
8171 pcie_device->reset_timeout = 30;
8173 pcie_device->reset_timeout = 30;
8175 if (ioc->wait_for_discovery_to_complete)
8176 _scsih_pcie_device_init_add(ioc, pcie_device);
8178 _scsih_pcie_device_add(ioc, pcie_device);
8180 pcie_device_put(pcie_device);
8185 * _scsih_pcie_topology_change_event_debug - debug for topology
8187 * @ioc: per adapter object
8188 * @event_data: event data payload
8192 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8193 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8199 char *status_str = NULL;
8200 u8 link_rate, prev_link_rate;
8202 switch (event_data->SwitchStatus) {
8203 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8206 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8207 status_str = "remove";
8209 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8211 status_str = "responding";
8213 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8214 status_str = "remove delay";
8217 status_str = "unknown status";
8220 ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
8221 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8222 "start_port(%02d), count(%d)\n",
8223 le16_to_cpu(event_data->SwitchDevHandle),
8224 le16_to_cpu(event_data->EnclosureHandle),
8225 event_data->StartPortNum, event_data->NumEntries);
8226 for (i = 0; i < event_data->NumEntries; i++) {
8228 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8231 port_number = event_data->StartPortNum + i;
8232 reason_code = event_data->PortEntry[i].PortStatus;
8233 switch (reason_code) {
8234 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8235 status_str = "target add";
8237 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8238 status_str = "target remove";
8240 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8241 status_str = "delay target remove";
8243 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8244 status_str = "link rate change";
8246 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8247 status_str = "target responding";
8250 status_str = "unknown";
8253 link_rate = event_data->PortEntry[i].CurrentPortInfo &
8254 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8255 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8256 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8257 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8258 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
8259 handle, status_str, link_rate, prev_link_rate);
8264 * _scsih_pcie_topology_change_event - handle PCIe topology
8266 * @ioc: per adapter object
8267 * @fw_event: The fw_event_work object
8272 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8273 struct fw_event_work *fw_event)
8278 u8 link_rate, prev_link_rate;
8279 unsigned long flags;
8281 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8282 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8283 struct _pcie_device *pcie_device;
8285 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8286 _scsih_pcie_topology_change_event_debug(ioc, event_data);
8288 if (ioc->shost_recovery || ioc->remove_host ||
8289 ioc->pci_error_recovery)
8292 if (fw_event->ignore) {
8293 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
8297 /* handle siblings events */
8298 for (i = 0; i < event_data->NumEntries; i++) {
8299 if (fw_event->ignore) {
8301 ioc_info(ioc, "ignoring switch event\n"));
8304 if (ioc->remove_host || ioc->pci_error_recovery)
8306 reason_code = event_data->PortEntry[i].PortStatus;
8308 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8312 link_rate = event_data->PortEntry[i].CurrentPortInfo
8313 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8314 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8315 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8317 switch (reason_code) {
8318 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8319 if (ioc->shost_recovery)
8321 if (link_rate == prev_link_rate)
8323 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8326 _scsih_pcie_check_device(ioc, handle);
8328 /* This code after this point handles the test case
8329 * where a device has been added, however its returning
8330 * BUSY for sometime. Then before the Device Missing
8331 * Delay expires and the device becomes READY, the
8332 * device is removed and added back.
8334 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8335 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8336 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8339 pcie_device_put(pcie_device);
8343 if (!test_bit(handle, ioc->pend_os_device_add))
8347 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8349 event_data->PortEntry[i].PortStatus &= 0xF0;
8350 event_data->PortEntry[i].PortStatus |=
8351 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
8353 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8354 if (ioc->shost_recovery)
8356 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8359 rc = _scsih_pcie_add_device(ioc, handle);
8361 /* mark entry vacant */
8362 /* TODO This needs to be reviewed and fixed,
8363 * we dont have an entry
8364 * to make an event void like vacant
8366 event_data->PortEntry[i].PortStatus |=
8367 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8370 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8371 _scsih_pcie_device_remove_by_handle(ioc, handle);
8378 * _scsih_pcie_device_status_change_event_debug - debug for device event
8380 * @event_data: event data payload
8384 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8385 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8387 char *reason_str = NULL;
8389 switch (event_data->ReasonCode) {
8390 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8391 reason_str = "smart data";
8393 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8394 reason_str = "unsupported device discovered";
8396 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8397 reason_str = "internal device reset";
8399 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8400 reason_str = "internal task abort";
8402 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8403 reason_str = "internal task abort set";
8405 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8406 reason_str = "internal clear task set";
8408 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8409 reason_str = "internal query task";
8411 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8412 reason_str = "device init failure";
8414 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8415 reason_str = "internal device reset complete";
8417 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8418 reason_str = "internal task abort complete";
8420 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8421 reason_str = "internal async notification";
8423 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8424 reason_str = "pcie hot reset failed";
8427 reason_str = "unknown reason";
8431 ioc_info(ioc, "PCIE device status change: (%s)\n"
8432 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8433 reason_str, le16_to_cpu(event_data->DevHandle),
8434 (u64)le64_to_cpu(event_data->WWID),
8435 le16_to_cpu(event_data->TaskTag));
8436 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
8437 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8438 event_data->ASC, event_data->ASCQ);
8443 * _scsih_pcie_device_status_change_event - handle device status
8445 * @ioc: per adapter object
8446 * @fw_event: The fw_event_work object
8450 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8451 struct fw_event_work *fw_event)
8453 struct MPT3SAS_TARGET *target_priv_data;
8454 struct _pcie_device *pcie_device;
8456 unsigned long flags;
8457 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8458 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8459 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8460 _scsih_pcie_device_status_change_event_debug(ioc,
8463 if (event_data->ReasonCode !=
8464 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8465 event_data->ReasonCode !=
8466 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8469 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8470 wwid = le64_to_cpu(event_data->WWID);
8471 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8473 if (!pcie_device || !pcie_device->starget)
8476 target_priv_data = pcie_device->starget->hostdata;
8477 if (!target_priv_data)
8480 if (event_data->ReasonCode ==
8481 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8482 target_priv_data->tm_busy = 1;
8484 target_priv_data->tm_busy = 0;
8487 pcie_device_put(pcie_device);
8489 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8493 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
8495 * @ioc: per adapter object
8496 * @event_data: event data payload
8500 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8501 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8503 char *reason_str = NULL;
8505 switch (event_data->ReasonCode) {
8506 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8507 reason_str = "enclosure add";
8509 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8510 reason_str = "enclosure remove";
8513 reason_str = "unknown reason";
8517 ioc_info(ioc, "enclosure status change: (%s)\n"
8518 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8520 le16_to_cpu(event_data->EnclosureHandle),
8521 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8522 le16_to_cpu(event_data->StartSlot));
8526 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
8527 * @ioc: per adapter object
8528 * @fw_event: The fw_event_work object
8532 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8533 struct fw_event_work *fw_event)
8535 Mpi2ConfigReply_t mpi_reply;
8536 struct _enclosure_node *enclosure_dev = NULL;
8537 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8538 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8540 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8542 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8543 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
8544 (Mpi2EventDataSasEnclDevStatusChange_t *)
8545 fw_event->event_data);
8546 if (ioc->shost_recovery)
8549 if (enclosure_handle)
8551 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8553 switch (event_data->ReasonCode) {
8554 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8555 if (!enclosure_dev) {
8557 kzalloc(sizeof(struct _enclosure_node),
8559 if (!enclosure_dev) {
8560 ioc_info(ioc, "failure at %s:%d/%s()!\n",
8561 __FILE__, __LINE__, __func__);
8564 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8565 &enclosure_dev->pg0,
8566 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8569 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8570 MPI2_IOCSTATUS_MASK)) {
8571 kfree(enclosure_dev);
8575 list_add_tail(&enclosure_dev->list,
8576 &ioc->enclosure_list);
8579 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8580 if (enclosure_dev) {
8581 list_del(&enclosure_dev->list);
8582 kfree(enclosure_dev);
8591 * _scsih_sas_broadcast_primitive_event - handle broadcast events
8592 * @ioc: per adapter object
8593 * @fw_event: The fw_event_work object
8597 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8598 struct fw_event_work *fw_event)
8600 struct scsi_cmnd *scmd;
8601 struct scsi_device *sdev;
8602 struct scsiio_tracker *st;
8605 struct MPT3SAS_DEVICE *sas_device_priv_data;
8606 u32 termination_count;
8608 Mpi2SCSITaskManagementReply_t *mpi_reply;
8609 Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8610 (Mpi2EventDataSasBroadcastPrimitive_t *)
8611 fw_event->event_data;
8613 unsigned long flags;
8616 u8 task_abort_retries;
8618 mutex_lock(&ioc->tm_cmds.mutex);
8619 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8620 __func__, event_data->PhyNum, event_data->PortWidth);
8622 _scsih_block_io_all_device(ioc);
8624 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8625 mpi_reply = ioc->tm_cmds.reply;
8626 broadcast_aen_retry:
8628 /* sanity checks for retrying this loop */
8629 if (max_retries++ == 5) {
8630 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
8632 } else if (max_retries > 1)
8634 ioc_info(ioc, "%s: %d retry\n",
8635 __func__, max_retries - 1));
8637 termination_count = 0;
8639 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8640 if (ioc->shost_recovery)
8642 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
8645 st = scsi_cmd_priv(scmd);
8646 sdev = scmd->device;
8647 sas_device_priv_data = sdev->hostdata;
8648 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8650 /* skip hidden raid components */
8651 if (sas_device_priv_data->sas_target->flags &
8652 MPT_TARGET_FLAGS_RAID_COMPONENT)
8655 if (sas_device_priv_data->sas_target->flags &
8656 MPT_TARGET_FLAGS_VOLUME)
8658 /* skip PCIe devices */
8659 if (sas_device_priv_data->sas_target->flags &
8660 MPT_TARGET_FLAGS_PCIE_DEVICE)
8663 handle = sas_device_priv_data->sas_target->handle;
8664 lun = sas_device_priv_data->lun;
8667 if (ioc->shost_recovery)
8670 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8671 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
8672 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
8673 st->msix_io, 30, 0);
8675 sdev_printk(KERN_WARNING, sdev,
8676 "mpt3sas_scsih_issue_tm: FAILED when sending "
8677 "QUERY_TASK: scmd(%p)\n", scmd);
8678 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8679 goto broadcast_aen_retry;
8681 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8682 & MPI2_IOCSTATUS_MASK;
8683 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8684 sdev_printk(KERN_WARNING, sdev,
8685 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8687 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8688 goto broadcast_aen_retry;
8691 /* see if IO is still owned by IOC and target */
8692 if (mpi_reply->ResponseCode ==
8693 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8694 mpi_reply->ResponseCode ==
8695 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8696 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8699 task_abort_retries = 0;
8701 if (task_abort_retries++ == 60) {
8703 ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8705 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8706 goto broadcast_aen_retry;
8709 if (ioc->shost_recovery)
8712 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8713 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8714 st->smid, st->msix_io, 30, 0);
8715 if (r == FAILED || st->cb_idx != 0xFF) {
8716 sdev_printk(KERN_WARNING, sdev,
8717 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8718 "scmd(%p)\n", scmd);
8722 if (task_abort_retries > 1)
8723 sdev_printk(KERN_WARNING, sdev,
8724 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8726 task_abort_retries - 1, scmd);
8728 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8729 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8732 if (ioc->broadcast_aen_pending) {
8735 "%s: loop back due to pending AEN\n",
8737 ioc->broadcast_aen_pending = 0;
8738 goto broadcast_aen_retry;
8742 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8746 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8747 __func__, query_count, termination_count));
8749 ioc->broadcast_aen_busy = 0;
8750 if (!ioc->shost_recovery)
8751 _scsih_ublock_io_all_device(ioc);
8752 mutex_unlock(&ioc->tm_cmds.mutex);
8756 * _scsih_sas_discovery_event - handle discovery events
8757 * @ioc: per adapter object
8758 * @fw_event: The fw_event_work object
8762 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8763 struct fw_event_work *fw_event)
8765 Mpi2EventDataSasDiscovery_t *event_data =
8766 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
8768 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
8769 ioc_info(ioc, "discovery event: (%s)",
8770 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8772 if (event_data->DiscoveryStatus)
8773 pr_cont("discovery_status(0x%08x)",
8774 le32_to_cpu(event_data->DiscoveryStatus));
8778 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8779 !ioc->sas_hba.num_phys) {
8780 if (disable_discovery > 0 && ioc->shost_recovery) {
8781 /* Wait for the reset to complete */
8782 while (ioc->shost_recovery)
8785 _scsih_sas_host_add(ioc);
8790 * _scsih_sas_device_discovery_error_event - display SAS device discovery error
8792 * @ioc: per adapter object
8793 * @fw_event: The fw_event_work object
8797 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8798 struct fw_event_work *fw_event)
8800 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8801 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8803 switch (event_data->ReasonCode) {
8804 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
8805 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8806 le16_to_cpu(event_data->DevHandle),
8807 (u64)le64_to_cpu(event_data->SASAddress),
8808 event_data->PhysicalPort);
8810 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
8811 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8812 le16_to_cpu(event_data->DevHandle),
8813 (u64)le64_to_cpu(event_data->SASAddress),
8814 event_data->PhysicalPort);
8822 * _scsih_pcie_enumeration_event - handle enumeration events
8823 * @ioc: per adapter object
8824 * @fw_event: The fw_event_work object
8828 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8829 struct fw_event_work *fw_event)
8831 Mpi26EventDataPCIeEnumeration_t *event_data =
8832 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8834 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8837 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8838 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8839 "started" : "completed",
8841 if (event_data->EnumerationStatus)
8842 pr_cont("enumeration_status(0x%08x)",
8843 le32_to_cpu(event_data->EnumerationStatus));
8848 * _scsih_ir_fastpath - turn on fastpath for IR physdisk
8849 * @ioc: per adapter object
8850 * @handle: device handle for physical disk
8851 * @phys_disk_num: physical disk number
8853 * Return: 0 for success, else failure.
8856 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8858 Mpi2RaidActionRequest_t *mpi_request;
8859 Mpi2RaidActionReply_t *mpi_reply;
8866 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8869 mutex_lock(&ioc->scsih_cmds.mutex);
8871 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
8872 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
8876 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8878 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8880 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
8881 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8886 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8887 ioc->scsih_cmds.smid = smid;
8888 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8890 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8891 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8892 mpi_request->PhysDiskNum = phys_disk_num;
8895 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8896 handle, phys_disk_num));
8898 init_completion(&ioc->scsih_cmds.done);
8899 ioc->put_smid_default(ioc, smid);
8900 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8902 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
8903 mpt3sas_check_cmd_timeout(ioc,
8904 ioc->scsih_cmds.status, mpi_request,
8905 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
8910 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8912 mpi_reply = ioc->scsih_cmds.reply;
8913 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8914 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8915 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
8918 ioc_status &= MPI2_IOCSTATUS_MASK;
8919 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8921 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8922 ioc_status, log_info));
8926 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8930 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8931 mutex_unlock(&ioc->scsih_cmds.mutex);
8934 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8939 * _scsih_reprobe_lun - reprobing lun
8940 * @sdev: scsi device struct
8941 * @no_uld_attach: sdev->no_uld_attach flag setting
8945 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
8947 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
8948 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
8949 sdev->no_uld_attach ? "hiding" : "exposing");
8950 WARN_ON(scsi_device_reprobe(sdev));
8954 * _scsih_sas_volume_add - add new volume
8955 * @ioc: per adapter object
8956 * @element: IR config element data
8960 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
8961 Mpi2EventIrConfigElement_t *element)
8963 struct _raid_device *raid_device;
8964 unsigned long flags;
8966 u16 handle = le16_to_cpu(element->VolDevHandle);
8969 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8971 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8972 __FILE__, __LINE__, __func__);
8976 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8977 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
8978 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8983 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8985 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8986 __FILE__, __LINE__, __func__);
8990 raid_device->id = ioc->sas_id++;
8991 raid_device->channel = RAID_CHANNEL;
8992 raid_device->handle = handle;
8993 raid_device->wwid = wwid;
8994 _scsih_raid_device_add(ioc, raid_device);
8995 if (!ioc->wait_for_discovery_to_complete) {
8996 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8997 raid_device->id, 0);
8999 _scsih_raid_device_remove(ioc, raid_device);
9001 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9002 _scsih_determine_boot_device(ioc, raid_device, 1);
9003 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9008 * _scsih_sas_volume_delete - delete volume
9009 * @ioc: per adapter object
9010 * @handle: volume device handle
9014 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
9016 struct _raid_device *raid_device;
9017 unsigned long flags;
9018 struct MPT3SAS_TARGET *sas_target_priv_data;
9019 struct scsi_target *starget = NULL;
9021 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9022 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9024 if (raid_device->starget) {
9025 starget = raid_device->starget;
9026 sas_target_priv_data = starget->hostdata;
9027 sas_target_priv_data->deleted = 1;
9029 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9030 raid_device->handle, (u64)raid_device->wwid);
9031 list_del(&raid_device->list);
9034 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9036 scsi_remove_target(&starget->dev);
9040 * _scsih_sas_pd_expose - expose pd component to /dev/sdX
9041 * @ioc: per adapter object
9042 * @element: IR config element data
9046 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
9047 Mpi2EventIrConfigElement_t *element)
9049 struct _sas_device *sas_device;
9050 struct scsi_target *starget = NULL;
9051 struct MPT3SAS_TARGET *sas_target_priv_data;
9052 unsigned long flags;
9053 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9055 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9056 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9058 sas_device->volume_handle = 0;
9059 sas_device->volume_wwid = 0;
9060 clear_bit(handle, ioc->pd_handles);
9061 if (sas_device->starget && sas_device->starget->hostdata) {
9062 starget = sas_device->starget;
9063 sas_target_priv_data = starget->hostdata;
9064 sas_target_priv_data->flags &=
9065 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9068 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9072 /* exposing raid component */
9074 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
9076 sas_device_put(sas_device);
9080 * _scsih_sas_pd_hide - hide pd component from /dev/sdX
9081 * @ioc: per adapter object
9082 * @element: IR config element data
9086 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9087 Mpi2EventIrConfigElement_t *element)
9089 struct _sas_device *sas_device;
9090 struct scsi_target *starget = NULL;
9091 struct MPT3SAS_TARGET *sas_target_priv_data;
9092 unsigned long flags;
9093 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9094 u16 volume_handle = 0;
9095 u64 volume_wwid = 0;
9097 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9099 mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9102 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9103 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9105 set_bit(handle, ioc->pd_handles);
9106 if (sas_device->starget && sas_device->starget->hostdata) {
9107 starget = sas_device->starget;
9108 sas_target_priv_data = starget->hostdata;
9109 sas_target_priv_data->flags |=
9110 MPT_TARGET_FLAGS_RAID_COMPONENT;
9111 sas_device->volume_handle = volume_handle;
9112 sas_device->volume_wwid = volume_wwid;
9115 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9119 /* hiding raid component */
9120 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9123 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
9125 sas_device_put(sas_device);
9129 * _scsih_sas_pd_delete - delete pd component
9130 * @ioc: per adapter object
9131 * @element: IR config element data
9135 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9136 Mpi2EventIrConfigElement_t *element)
9138 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9140 _scsih_device_remove_by_handle(ioc, handle);
9144 * _scsih_sas_pd_add - remove pd component
9145 * @ioc: per adapter object
9146 * @element: IR config element data
9150 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9151 Mpi2EventIrConfigElement_t *element)
9153 struct _sas_device *sas_device;
9154 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9155 Mpi2ConfigReply_t mpi_reply;
9156 Mpi2SasDevicePage0_t sas_device_pg0;
9161 set_bit(handle, ioc->pd_handles);
9163 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9165 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9166 sas_device_put(sas_device);
9170 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9171 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
9172 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9173 __FILE__, __LINE__, __func__);
9177 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9178 MPI2_IOCSTATUS_MASK;
9179 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9180 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9181 __FILE__, __LINE__, __func__);
9185 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9186 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9187 mpt3sas_transport_update_links(ioc, sas_address, handle,
9188 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9189 mpt3sas_get_port_by_id(ioc,
9190 sas_device_pg0.PhysicalPort, 0));
9192 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9193 _scsih_add_device(ioc, handle, 0, 1);
9197 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
9198 * @ioc: per adapter object
9199 * @event_data: event data payload
9203 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9204 Mpi2EventDataIrConfigChangeList_t *event_data)
9206 Mpi2EventIrConfigElement_t *element;
9209 char *reason_str = NULL, *element_str = NULL;
9211 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9213 ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9214 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9215 "foreign" : "native",
9216 event_data->NumElements);
9217 for (i = 0; i < event_data->NumElements; i++, element++) {
9218 switch (element->ReasonCode) {
9219 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9222 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9223 reason_str = "remove";
9225 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9226 reason_str = "no change";
9228 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9229 reason_str = "hide";
9231 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9232 reason_str = "unhide";
9234 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9235 reason_str = "volume_created";
9237 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9238 reason_str = "volume_deleted";
9240 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9241 reason_str = "pd_created";
9243 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9244 reason_str = "pd_deleted";
9247 reason_str = "unknown reason";
9250 element_type = le16_to_cpu(element->ElementFlags) &
9251 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9252 switch (element_type) {
9253 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9254 element_str = "volume";
9256 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9257 element_str = "phys disk";
9259 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9260 element_str = "hot spare";
9263 element_str = "unknown element";
9266 pr_info("\t(%s:%s), vol handle(0x%04x), " \
9267 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9268 reason_str, le16_to_cpu(element->VolDevHandle),
9269 le16_to_cpu(element->PhysDiskDevHandle),
9270 element->PhysDiskNum);
9275 * _scsih_sas_ir_config_change_event - handle ir configuration change events
9276 * @ioc: per adapter object
9277 * @fw_event: The fw_event_work object
9281 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9282 struct fw_event_work *fw_event)
9284 Mpi2EventIrConfigElement_t *element;
9287 Mpi2EventDataIrConfigChangeList_t *event_data =
9288 (Mpi2EventDataIrConfigChangeList_t *)
9289 fw_event->event_data;
9291 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9292 (!ioc->hide_ir_msg))
9293 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
9295 foreign_config = (le32_to_cpu(event_data->Flags) &
9296 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9298 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9299 if (ioc->shost_recovery &&
9300 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
9301 for (i = 0; i < event_data->NumElements; i++, element++) {
9302 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9303 _scsih_ir_fastpath(ioc,
9304 le16_to_cpu(element->PhysDiskDevHandle),
9305 element->PhysDiskNum);
9310 for (i = 0; i < event_data->NumElements; i++, element++) {
9312 switch (element->ReasonCode) {
9313 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9314 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9315 if (!foreign_config)
9316 _scsih_sas_volume_add(ioc, element);
9318 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9319 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9320 if (!foreign_config)
9321 _scsih_sas_volume_delete(ioc,
9322 le16_to_cpu(element->VolDevHandle));
9324 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9325 if (!ioc->is_warpdrive)
9326 _scsih_sas_pd_hide(ioc, element);
9328 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9329 if (!ioc->is_warpdrive)
9330 _scsih_sas_pd_expose(ioc, element);
9332 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9333 if (!ioc->is_warpdrive)
9334 _scsih_sas_pd_add(ioc, element);
9336 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9337 if (!ioc->is_warpdrive)
9338 _scsih_sas_pd_delete(ioc, element);
9345 * _scsih_sas_ir_volume_event - IR volume event
9346 * @ioc: per adapter object
9347 * @fw_event: The fw_event_work object
9351 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9352 struct fw_event_work *fw_event)
9355 unsigned long flags;
9356 struct _raid_device *raid_device;
9360 Mpi2EventDataIrVolume_t *event_data =
9361 (Mpi2EventDataIrVolume_t *) fw_event->event_data;
9363 if (ioc->shost_recovery)
9366 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9369 handle = le16_to_cpu(event_data->VolDevHandle);
9370 state = le32_to_cpu(event_data->NewValue);
9371 if (!ioc->hide_ir_msg)
9373 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9375 le32_to_cpu(event_data->PreviousValue),
9378 case MPI2_RAID_VOL_STATE_MISSING:
9379 case MPI2_RAID_VOL_STATE_FAILED:
9380 _scsih_sas_volume_delete(ioc, handle);
9383 case MPI2_RAID_VOL_STATE_ONLINE:
9384 case MPI2_RAID_VOL_STATE_DEGRADED:
9385 case MPI2_RAID_VOL_STATE_OPTIMAL:
9387 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9388 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9389 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9394 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9396 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9397 __FILE__, __LINE__, __func__);
9401 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9403 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9404 __FILE__, __LINE__, __func__);
9408 raid_device->id = ioc->sas_id++;
9409 raid_device->channel = RAID_CHANNEL;
9410 raid_device->handle = handle;
9411 raid_device->wwid = wwid;
9412 _scsih_raid_device_add(ioc, raid_device);
9413 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9414 raid_device->id, 0);
9416 _scsih_raid_device_remove(ioc, raid_device);
9419 case MPI2_RAID_VOL_STATE_INITIALIZING:
9426 * _scsih_sas_ir_physical_disk_event - PD event
9427 * @ioc: per adapter object
9428 * @fw_event: The fw_event_work object
9432 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9433 struct fw_event_work *fw_event)
9435 u16 handle, parent_handle;
9437 struct _sas_device *sas_device;
9438 Mpi2ConfigReply_t mpi_reply;
9439 Mpi2SasDevicePage0_t sas_device_pg0;
9441 Mpi2EventDataIrPhysicalDisk_t *event_data =
9442 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
9445 if (ioc->shost_recovery)
9448 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9451 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9452 state = le32_to_cpu(event_data->NewValue);
9454 if (!ioc->hide_ir_msg)
9456 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9458 le32_to_cpu(event_data->PreviousValue),
9462 case MPI2_RAID_PD_STATE_ONLINE:
9463 case MPI2_RAID_PD_STATE_DEGRADED:
9464 case MPI2_RAID_PD_STATE_REBUILDING:
9465 case MPI2_RAID_PD_STATE_OPTIMAL:
9466 case MPI2_RAID_PD_STATE_HOT_SPARE:
9468 if (!ioc->is_warpdrive)
9469 set_bit(handle, ioc->pd_handles);
9471 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9473 sas_device_put(sas_device);
9477 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9478 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9480 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9481 __FILE__, __LINE__, __func__);
9485 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9486 MPI2_IOCSTATUS_MASK;
9487 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9488 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9489 __FILE__, __LINE__, __func__);
9493 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9494 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9495 mpt3sas_transport_update_links(ioc, sas_address, handle,
9496 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9497 mpt3sas_get_port_by_id(ioc,
9498 sas_device_pg0.PhysicalPort, 0));
9500 _scsih_add_device(ioc, handle, 0, 1);
9504 case MPI2_RAID_PD_STATE_OFFLINE:
9505 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9506 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9513 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
9514 * @ioc: per adapter object
9515 * @event_data: event data payload
9519 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9520 Mpi2EventDataIrOperationStatus_t *event_data)
9522 char *reason_str = NULL;
9524 switch (event_data->RAIDOperation) {
9525 case MPI2_EVENT_IR_RAIDOP_RESYNC:
9526 reason_str = "resync";
9528 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9529 reason_str = "online capacity expansion";
9531 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9532 reason_str = "consistency check";
9534 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9535 reason_str = "background init";
9537 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9538 reason_str = "make data consistent";
9545 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9547 le16_to_cpu(event_data->VolDevHandle),
9548 event_data->PercentComplete);
9552 * _scsih_sas_ir_operation_status_event - handle RAID operation events
9553 * @ioc: per adapter object
9554 * @fw_event: The fw_event_work object
9558 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9559 struct fw_event_work *fw_event)
9561 Mpi2EventDataIrOperationStatus_t *event_data =
9562 (Mpi2EventDataIrOperationStatus_t *)
9563 fw_event->event_data;
9564 static struct _raid_device *raid_device;
9565 unsigned long flags;
9568 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9569 (!ioc->hide_ir_msg))
9570 _scsih_sas_ir_operation_status_event_debug(ioc,
9573 /* code added for raid transport support */
9574 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9576 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9577 handle = le16_to_cpu(event_data->VolDevHandle);
9578 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9580 raid_device->percent_complete =
9581 event_data->PercentComplete;
9582 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9587 * _scsih_prep_device_scan - initialize parameters prior to device scan
9588 * @ioc: per adapter object
9590 * Set the deleted flag prior to device scan. If the device is found during
9591 * the scan, then we clear the deleted flag.
9594 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9596 struct MPT3SAS_DEVICE *sas_device_priv_data;
9597 struct scsi_device *sdev;
9599 shost_for_each_device(sdev, ioc->shost) {
9600 sas_device_priv_data = sdev->hostdata;
9601 if (sas_device_priv_data && sas_device_priv_data->sas_target)
9602 sas_device_priv_data->sas_target->deleted = 1;
9607 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
9608 * @ioc: per adapter object
9609 * @sas_device_pg0: SAS Device page 0
9611 * After host reset, find out whether devices are still responding.
9612 * Used in _scsih_remove_unresponsive_sas_devices.
9615 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9616 Mpi2SasDevicePage0_t *sas_device_pg0)
9618 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9619 struct scsi_target *starget;
9620 struct _sas_device *sas_device = NULL;
9621 struct _enclosure_node *enclosure_dev = NULL;
9622 unsigned long flags;
9623 struct hba_port *port = mpt3sas_get_port_by_id(
9624 ioc, sas_device_pg0->PhysicalPort, 0);
9626 if (sas_device_pg0->EnclosureHandle) {
9628 mpt3sas_scsih_enclosure_find_by_handle(ioc,
9629 le16_to_cpu(sas_device_pg0->EnclosureHandle));
9630 if (enclosure_dev == NULL)
9631 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9632 sas_device_pg0->EnclosureHandle);
9634 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9635 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
9636 if (sas_device->sas_address != le64_to_cpu(
9637 sas_device_pg0->SASAddress))
9639 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9641 if (sas_device->port != port)
9643 sas_device->responding = 1;
9644 starget = sas_device->starget;
9645 if (starget && starget->hostdata) {
9646 sas_target_priv_data = starget->hostdata;
9647 sas_target_priv_data->tm_busy = 0;
9648 sas_target_priv_data->deleted = 0;
9650 sas_target_priv_data = NULL;
9652 starget_printk(KERN_INFO, starget,
9653 "handle(0x%04x), sas_addr(0x%016llx)\n",
9654 le16_to_cpu(sas_device_pg0->DevHandle),
9655 (unsigned long long)
9656 sas_device->sas_address);
9658 if (sas_device->enclosure_handle != 0)
9659 starget_printk(KERN_INFO, starget,
9660 "enclosure logical id(0x%016llx), slot(%d)\n",
9661 (unsigned long long)
9662 sas_device->enclosure_logical_id,
9665 if (le16_to_cpu(sas_device_pg0->Flags) &
9666 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9667 sas_device->enclosure_level =
9668 sas_device_pg0->EnclosureLevel;
9669 memcpy(&sas_device->connector_name[0],
9670 &sas_device_pg0->ConnectorName[0], 4);
9672 sas_device->enclosure_level = 0;
9673 sas_device->connector_name[0] = '\0';
9676 sas_device->enclosure_handle =
9677 le16_to_cpu(sas_device_pg0->EnclosureHandle);
9678 sas_device->is_chassis_slot_valid = 0;
9679 if (enclosure_dev) {
9680 sas_device->enclosure_logical_id = le64_to_cpu(
9681 enclosure_dev->pg0.EnclosureLogicalID);
9682 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9683 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9684 sas_device->is_chassis_slot_valid = 1;
9685 sas_device->chassis_slot =
9686 enclosure_dev->pg0.ChassisSlot;
9690 if (sas_device->handle == le16_to_cpu(
9691 sas_device_pg0->DevHandle))
9693 pr_info("\thandle changed from(0x%04x)!!!\n",
9694 sas_device->handle);
9695 sas_device->handle = le16_to_cpu(
9696 sas_device_pg0->DevHandle);
9697 if (sas_target_priv_data)
9698 sas_target_priv_data->handle =
9699 le16_to_cpu(sas_device_pg0->DevHandle);
9703 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9707 * _scsih_create_enclosure_list_after_reset - Free Existing list,
9708 * And create enclosure list by scanning all Enclosure Page(0)s
9709 * @ioc: per adapter object
9712 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9714 struct _enclosure_node *enclosure_dev;
9715 Mpi2ConfigReply_t mpi_reply;
9716 u16 enclosure_handle;
9719 /* Free existing enclosure list */
9720 mpt3sas_free_enclosure_list(ioc);
9722 /* Re constructing enclosure list after reset*/
9723 enclosure_handle = 0xFFFF;
9726 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9727 if (!enclosure_dev) {
9728 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9729 __FILE__, __LINE__, __func__);
9732 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9733 &enclosure_dev->pg0,
9734 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9737 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9738 MPI2_IOCSTATUS_MASK)) {
9739 kfree(enclosure_dev);
9742 list_add_tail(&enclosure_dev->list,
9743 &ioc->enclosure_list);
9745 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9750 * _scsih_search_responding_sas_devices -
9751 * @ioc: per adapter object
9753 * After host reset, find out whether devices are still responding.
9757 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9759 Mpi2SasDevicePage0_t sas_device_pg0;
9760 Mpi2ConfigReply_t mpi_reply;
9765 ioc_info(ioc, "search for end-devices: start\n");
9767 if (list_empty(&ioc->sas_device_list))
9771 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9772 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9774 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9775 MPI2_IOCSTATUS_MASK;
9776 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9778 handle = le16_to_cpu(sas_device_pg0.DevHandle);
9779 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9780 if (!(_scsih_is_end_device(device_info)))
9782 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
9786 ioc_info(ioc, "search for end-devices: complete\n");
9790 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
9791 * @ioc: per adapter object
9792 * @pcie_device_pg0: PCIe Device page 0
9794 * After host reset, find out whether devices are still responding.
9795 * Used in _scsih_remove_unresponding_devices.
9798 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9799 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9801 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9802 struct scsi_target *starget;
9803 struct _pcie_device *pcie_device;
9804 unsigned long flags;
9806 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9807 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
9808 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9809 && (pcie_device->slot == le16_to_cpu(
9810 pcie_device_pg0->Slot))) {
9811 pcie_device->access_status =
9812 pcie_device_pg0->AccessStatus;
9813 pcie_device->responding = 1;
9814 starget = pcie_device->starget;
9815 if (starget && starget->hostdata) {
9816 sas_target_priv_data = starget->hostdata;
9817 sas_target_priv_data->tm_busy = 0;
9818 sas_target_priv_data->deleted = 0;
9820 sas_target_priv_data = NULL;
9822 starget_printk(KERN_INFO, starget,
9823 "handle(0x%04x), wwid(0x%016llx) ",
9824 pcie_device->handle,
9825 (unsigned long long)pcie_device->wwid);
9826 if (pcie_device->enclosure_handle != 0)
9827 starget_printk(KERN_INFO, starget,
9828 "enclosure logical id(0x%016llx), "
9830 (unsigned long long)
9831 pcie_device->enclosure_logical_id,
9835 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9836 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9837 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9838 pcie_device->enclosure_level =
9839 pcie_device_pg0->EnclosureLevel;
9840 memcpy(&pcie_device->connector_name[0],
9841 &pcie_device_pg0->ConnectorName[0], 4);
9843 pcie_device->enclosure_level = 0;
9844 pcie_device->connector_name[0] = '\0';
9847 if (pcie_device->handle == le16_to_cpu(
9848 pcie_device_pg0->DevHandle))
9850 pr_info("\thandle changed from(0x%04x)!!!\n",
9851 pcie_device->handle);
9852 pcie_device->handle = le16_to_cpu(
9853 pcie_device_pg0->DevHandle);
9854 if (sas_target_priv_data)
9855 sas_target_priv_data->handle =
9856 le16_to_cpu(pcie_device_pg0->DevHandle);
9862 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9866 * _scsih_search_responding_pcie_devices -
9867 * @ioc: per adapter object
9869 * After host reset, find out whether devices are still responding.
9873 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9875 Mpi26PCIeDevicePage0_t pcie_device_pg0;
9876 Mpi2ConfigReply_t mpi_reply;
9881 ioc_info(ioc, "search for end-devices: start\n");
9883 if (list_empty(&ioc->pcie_device_list))
9887 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9888 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9890 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9891 MPI2_IOCSTATUS_MASK;
9892 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9893 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9894 __func__, ioc_status,
9895 le32_to_cpu(mpi_reply.IOCLogInfo));
9898 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9899 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9900 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
9902 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
9905 ioc_info(ioc, "search for PCIe end-devices: complete\n");
9909 * _scsih_mark_responding_raid_device - mark a raid_device as responding
9910 * @ioc: per adapter object
9911 * @wwid: world wide identifier for raid volume
9912 * @handle: device handle
9914 * After host reset, find out whether devices are still responding.
9915 * Used in _scsih_remove_unresponsive_raid_devices.
9918 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
9921 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9922 struct scsi_target *starget;
9923 struct _raid_device *raid_device;
9924 unsigned long flags;
9926 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9927 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
9928 if (raid_device->wwid == wwid && raid_device->starget) {
9929 starget = raid_device->starget;
9930 if (starget && starget->hostdata) {
9931 sas_target_priv_data = starget->hostdata;
9932 sas_target_priv_data->deleted = 0;
9934 sas_target_priv_data = NULL;
9935 raid_device->responding = 1;
9936 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9937 starget_printk(KERN_INFO, raid_device->starget,
9938 "handle(0x%04x), wwid(0x%016llx)\n", handle,
9939 (unsigned long long)raid_device->wwid);
9942 * WARPDRIVE: The handles of the PDs might have changed
9943 * across the host reset so re-initialize the
9944 * required data for Direct IO
9946 mpt3sas_init_warpdrive_properties(ioc, raid_device);
9947 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9948 if (raid_device->handle == handle) {
9949 spin_unlock_irqrestore(&ioc->raid_device_lock,
9953 pr_info("\thandle changed from(0x%04x)!!!\n",
9954 raid_device->handle);
9955 raid_device->handle = handle;
9956 if (sas_target_priv_data)
9957 sas_target_priv_data->handle = handle;
9958 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9962 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9966 * _scsih_search_responding_raid_devices -
9967 * @ioc: per adapter object
9969 * After host reset, find out whether devices are still responding.
9973 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
9975 Mpi2RaidVolPage1_t volume_pg1;
9976 Mpi2RaidVolPage0_t volume_pg0;
9977 Mpi2RaidPhysDiskPage0_t pd_pg0;
9978 Mpi2ConfigReply_t mpi_reply;
9983 if (!ioc->ir_firmware)
9986 ioc_info(ioc, "search for raid volumes: start\n");
9988 if (list_empty(&ioc->raid_device_list))
9992 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9993 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9994 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9995 MPI2_IOCSTATUS_MASK;
9996 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9998 handle = le16_to_cpu(volume_pg1.DevHandle);
10000 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10001 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10002 sizeof(Mpi2RaidVolPage0_t)))
10005 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10006 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10007 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
10008 _scsih_mark_responding_raid_device(ioc,
10009 le64_to_cpu(volume_pg1.WWID), handle);
10012 /* refresh the pd_handles */
10013 if (!ioc->is_warpdrive) {
10014 phys_disk_num = 0xFF;
10015 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
10016 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10017 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10019 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10020 MPI2_IOCSTATUS_MASK;
10021 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10023 phys_disk_num = pd_pg0.PhysDiskNum;
10024 handle = le16_to_cpu(pd_pg0.DevHandle);
10025 set_bit(handle, ioc->pd_handles);
10029 ioc_info(ioc, "search for responding raid volumes: complete\n");
10033 * _scsih_mark_responding_expander - mark a expander as responding
10034 * @ioc: per adapter object
10035 * @expander_pg0:SAS Expander Config Page0
10037 * After host reset, find out whether devices are still responding.
10038 * Used in _scsih_remove_unresponsive_expanders.
10041 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
10042 Mpi2ExpanderPage0_t *expander_pg0)
10044 struct _sas_node *sas_expander = NULL;
10045 unsigned long flags;
10047 struct _enclosure_node *enclosure_dev = NULL;
10048 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
10049 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
10050 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
10051 struct hba_port *port = mpt3sas_get_port_by_id(
10052 ioc, expander_pg0->PhysicalPort, 0);
10054 if (enclosure_handle)
10056 mpt3sas_scsih_enclosure_find_by_handle(ioc,
10059 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10060 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10061 if (sas_expander->sas_address != sas_address)
10063 if (sas_expander->port != port)
10065 sas_expander->responding = 1;
10067 if (enclosure_dev) {
10068 sas_expander->enclosure_logical_id =
10069 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10070 sas_expander->enclosure_handle =
10071 le16_to_cpu(expander_pg0->EnclosureHandle);
10074 if (sas_expander->handle == handle)
10076 pr_info("\texpander(0x%016llx): handle changed" \
10077 " from(0x%04x) to (0x%04x)!!!\n",
10078 (unsigned long long)sas_expander->sas_address,
10079 sas_expander->handle, handle);
10080 sas_expander->handle = handle;
10081 for (i = 0 ; i < sas_expander->num_phys ; i++)
10082 sas_expander->phy[i].handle = handle;
10086 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10090 * _scsih_search_responding_expanders -
10091 * @ioc: per adapter object
10093 * After host reset, find out whether devices are still responding.
10097 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10099 Mpi2ExpanderPage0_t expander_pg0;
10100 Mpi2ConfigReply_t mpi_reply;
10106 ioc_info(ioc, "search for expanders: start\n");
10108 if (list_empty(&ioc->sas_expander_list))
10112 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10113 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10115 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10116 MPI2_IOCSTATUS_MASK;
10117 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10120 handle = le16_to_cpu(expander_pg0.DevHandle);
10121 sas_address = le64_to_cpu(expander_pg0.SASAddress);
10122 port = expander_pg0.PhysicalPort;
10124 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10125 handle, (unsigned long long)sas_address,
10126 (ioc->multipath_on_hba ?
10127 port : MULTIPATH_DISABLED_PORT_ID));
10128 _scsih_mark_responding_expander(ioc, &expander_pg0);
10132 ioc_info(ioc, "search for expanders: complete\n");
10136 * _scsih_remove_unresponding_devices - removing unresponding devices
10137 * @ioc: per adapter object
10140 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
10142 struct _sas_device *sas_device, *sas_device_next;
10143 struct _sas_node *sas_expander, *sas_expander_next;
10144 struct _raid_device *raid_device, *raid_device_next;
10145 struct _pcie_device *pcie_device, *pcie_device_next;
10146 struct list_head tmp_list;
10147 unsigned long flags;
10150 ioc_info(ioc, "removing unresponding devices: start\n");
10152 /* removing unresponding end devices */
10153 ioc_info(ioc, "removing unresponding devices: end-devices\n");
10155 * Iterate, pulling off devices marked as non-responding. We become the
10156 * owner for the reference the list had on any object we prune.
10158 spin_lock_irqsave(&ioc->sas_device_lock, flags);
10161 * Clean up the sas_device_init_list list as
10162 * driver goes for fresh scan as part of diag reset.
10164 list_for_each_entry_safe(sas_device, sas_device_next,
10165 &ioc->sas_device_init_list, list) {
10166 list_del_init(&sas_device->list);
10167 sas_device_put(sas_device);
10170 list_for_each_entry_safe(sas_device, sas_device_next,
10171 &ioc->sas_device_list, list) {
10172 if (!sas_device->responding)
10173 list_move_tail(&sas_device->list, &head);
10175 sas_device->responding = 0;
10177 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10180 * Now, uninitialize and remove the unresponding devices we pruned.
10182 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10183 _scsih_remove_device(ioc, sas_device);
10184 list_del_init(&sas_device->list);
10185 sas_device_put(sas_device);
10188 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
10189 INIT_LIST_HEAD(&head);
10190 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10192 * Clean up the pcie_device_init_list list as
10193 * driver goes for fresh scan as part of diag reset.
10195 list_for_each_entry_safe(pcie_device, pcie_device_next,
10196 &ioc->pcie_device_init_list, list) {
10197 list_del_init(&pcie_device->list);
10198 pcie_device_put(pcie_device);
10201 list_for_each_entry_safe(pcie_device, pcie_device_next,
10202 &ioc->pcie_device_list, list) {
10203 if (!pcie_device->responding)
10204 list_move_tail(&pcie_device->list, &head);
10206 pcie_device->responding = 0;
10208 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10210 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10211 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10212 list_del_init(&pcie_device->list);
10213 pcie_device_put(pcie_device);
10216 /* removing unresponding volumes */
10217 if (ioc->ir_firmware) {
10218 ioc_info(ioc, "removing unresponding devices: volumes\n");
10219 list_for_each_entry_safe(raid_device, raid_device_next,
10220 &ioc->raid_device_list, list) {
10221 if (!raid_device->responding)
10222 _scsih_sas_volume_delete(ioc,
10223 raid_device->handle);
10225 raid_device->responding = 0;
10229 /* removing unresponding expanders */
10230 ioc_info(ioc, "removing unresponding devices: expanders\n");
10231 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10232 INIT_LIST_HEAD(&tmp_list);
10233 list_for_each_entry_safe(sas_expander, sas_expander_next,
10234 &ioc->sas_expander_list, list) {
10235 if (!sas_expander->responding)
10236 list_move_tail(&sas_expander->list, &tmp_list);
10238 sas_expander->responding = 0;
10240 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10241 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10243 _scsih_expander_node_remove(ioc, sas_expander);
10246 ioc_info(ioc, "removing unresponding devices: complete\n");
10248 /* unblock devices */
10249 _scsih_ublock_io_all_device(ioc);
10253 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10254 struct _sas_node *sas_expander, u16 handle)
10256 Mpi2ExpanderPage1_t expander_pg1;
10257 Mpi2ConfigReply_t mpi_reply;
10260 for (i = 0 ; i < sas_expander->num_phys ; i++) {
10261 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10262 &expander_pg1, i, handle))) {
10263 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10264 __FILE__, __LINE__, __func__);
10268 mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10269 le16_to_cpu(expander_pg1.AttachedDevHandle), i,
10270 expander_pg1.NegotiatedLinkRate >> 4,
10271 sas_expander->port);
10276 * _scsih_scan_for_devices_after_reset - scan for devices after host reset
10277 * @ioc: per adapter object
10280 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10282 Mpi2ExpanderPage0_t expander_pg0;
10283 Mpi2SasDevicePage0_t sas_device_pg0;
10284 Mpi26PCIeDevicePage0_t pcie_device_pg0;
10285 Mpi2RaidVolPage1_t *volume_pg1;
10286 Mpi2RaidVolPage0_t *volume_pg0;
10287 Mpi2RaidPhysDiskPage0_t pd_pg0;
10288 Mpi2EventIrConfigElement_t element;
10289 Mpi2ConfigReply_t mpi_reply;
10290 u8 phys_disk_num, port_id;
10292 u16 handle, parent_handle;
10294 struct _sas_device *sas_device;
10295 struct _pcie_device *pcie_device;
10296 struct _sas_node *expander_device;
10297 static struct _raid_device *raid_device;
10299 unsigned long flags;
10301 volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL);
10305 volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL);
10311 ioc_info(ioc, "scan devices: start\n");
10313 _scsih_sas_host_refresh(ioc);
10315 ioc_info(ioc, "\tscan devices: expanders start\n");
10319 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10320 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10321 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10322 MPI2_IOCSTATUS_MASK;
10323 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10324 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10325 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10328 handle = le16_to_cpu(expander_pg0.DevHandle);
10329 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10330 port_id = expander_pg0.PhysicalPort;
10331 expander_device = mpt3sas_scsih_expander_find_by_sas_address(
10332 ioc, le64_to_cpu(expander_pg0.SASAddress),
10333 mpt3sas_get_port_by_id(ioc, port_id, 0));
10334 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10335 if (expander_device)
10336 _scsih_refresh_expander_links(ioc, expander_device,
10339 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10341 (u64)le64_to_cpu(expander_pg0.SASAddress));
10342 _scsih_expander_add(ioc, handle);
10343 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10345 (u64)le64_to_cpu(expander_pg0.SASAddress));
10349 ioc_info(ioc, "\tscan devices: expanders complete\n");
10351 if (!ioc->ir_firmware)
10354 ioc_info(ioc, "\tscan devices: phys disk start\n");
10357 phys_disk_num = 0xFF;
10358 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10359 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10361 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10362 MPI2_IOCSTATUS_MASK;
10363 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10364 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10365 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10368 phys_disk_num = pd_pg0.PhysDiskNum;
10369 handle = le16_to_cpu(pd_pg0.DevHandle);
10370 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10372 sas_device_put(sas_device);
10375 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10376 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10379 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10380 MPI2_IOCSTATUS_MASK;
10381 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10382 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10383 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10386 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10387 if (!_scsih_get_sas_address(ioc, parent_handle,
10389 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10391 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10392 port_id = sas_device_pg0.PhysicalPort;
10393 mpt3sas_transport_update_links(ioc, sas_address,
10394 handle, sas_device_pg0.PhyNum,
10395 MPI2_SAS_NEG_LINK_RATE_1_5,
10396 mpt3sas_get_port_by_id(ioc, port_id, 0));
10397 set_bit(handle, ioc->pd_handles);
10399 /* This will retry adding the end device.
10400 * _scsih_add_device() will decide on retries and
10401 * return "1" when it should be retried
10403 while (_scsih_add_device(ioc, handle, retry_count++,
10407 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10409 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10413 ioc_info(ioc, "\tscan devices: phys disk complete\n");
10415 ioc_info(ioc, "\tscan devices: volumes start\n");
10419 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10420 volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10421 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10422 MPI2_IOCSTATUS_MASK;
10423 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10424 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10425 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10428 handle = le16_to_cpu(volume_pg1->DevHandle);
10429 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10430 raid_device = _scsih_raid_device_find_by_wwid(ioc,
10431 le64_to_cpu(volume_pg1->WWID));
10432 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10435 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10436 volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10437 sizeof(Mpi2RaidVolPage0_t)))
10439 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10440 MPI2_IOCSTATUS_MASK;
10441 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10442 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10443 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10446 if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10447 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10448 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
10449 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10450 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
10451 element.VolDevHandle = volume_pg1->DevHandle;
10452 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
10453 volume_pg1->DevHandle);
10454 _scsih_sas_volume_add(ioc, &element);
10455 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
10456 volume_pg1->DevHandle);
10460 ioc_info(ioc, "\tscan devices: volumes complete\n");
10464 ioc_info(ioc, "\tscan devices: end devices start\n");
10468 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10469 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10471 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10472 MPI2_IOCSTATUS_MASK;
10473 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10474 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10475 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10478 handle = le16_to_cpu(sas_device_pg0.DevHandle);
10479 if (!(_scsih_is_end_device(
10480 le32_to_cpu(sas_device_pg0.DeviceInfo))))
10482 port_id = sas_device_pg0.PhysicalPort;
10483 sas_device = mpt3sas_get_sdev_by_addr(ioc,
10484 le64_to_cpu(sas_device_pg0.SASAddress),
10485 mpt3sas_get_port_by_id(ioc, port_id, 0));
10487 sas_device_put(sas_device);
10490 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10491 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
10492 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10494 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10495 mpt3sas_transport_update_links(ioc, sas_address, handle,
10496 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10497 mpt3sas_get_port_by_id(ioc, port_id, 0));
10499 /* This will retry adding the end device.
10500 * _scsih_add_device() will decide on retries and
10501 * return "1" when it should be retried
10503 while (_scsih_add_device(ioc, handle, retry_count++,
10507 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10509 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10512 ioc_info(ioc, "\tscan devices: end devices complete\n");
10513 ioc_info(ioc, "\tscan devices: pcie end devices start\n");
10517 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10518 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10520 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10521 & MPI2_IOCSTATUS_MASK;
10522 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10523 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10524 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10527 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
10528 if (!(_scsih_is_nvme_pciescsi_device(
10529 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10531 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10532 le64_to_cpu(pcie_device_pg0.WWID));
10534 pcie_device_put(pcie_device);
10538 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10539 _scsih_pcie_add_device(ioc, handle);
10541 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10542 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
10548 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10549 ioc_info(ioc, "scan devices: complete\n");
10553 * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
10554 * @ioc: per adapter object
10556 * The handler for doing any required cleanup or initialization.
10558 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10560 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
10564 * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
10566 * @ioc: per adapter object
10568 * The handler for doing any required cleanup or initialization.
10571 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
10574 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
10575 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10576 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10577 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10578 complete(&ioc->scsih_cmds.done);
10580 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10581 ioc->tm_cmds.status |= MPT3_CMD_RESET;
10582 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10583 complete(&ioc->tm_cmds.done);
10586 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10587 memset(ioc->device_remove_in_progress, 0,
10588 ioc->device_remove_in_progress_sz);
10589 _scsih_fw_event_cleanup_queue(ioc);
10590 _scsih_flush_running_cmds(ioc);
10594 * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
10595 * @ioc: per adapter object
10597 * The handler for doing any required cleanup or initialization.
10600 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10602 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
10603 if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
10604 if (ioc->multipath_on_hba) {
10605 _scsih_sas_port_refresh(ioc);
10606 _scsih_update_vphys_after_reset(ioc);
10608 _scsih_prep_device_scan(ioc);
10609 _scsih_create_enclosure_list_after_reset(ioc);
10610 _scsih_search_responding_sas_devices(ioc);
10611 _scsih_search_responding_pcie_devices(ioc);
10612 _scsih_search_responding_raid_devices(ioc);
10613 _scsih_search_responding_expanders(ioc);
10614 _scsih_error_recovery_delete_devices(ioc);
10619 * _mpt3sas_fw_work - delayed task for processing firmware events
10620 * @ioc: per adapter object
10621 * @fw_event: The fw_event_work object
10625 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10627 ioc->current_event = fw_event;
10628 _scsih_fw_event_del_from_list(ioc, fw_event);
10630 /* the queue is being flushed so ignore this event */
10631 if (ioc->remove_host || ioc->pci_error_recovery) {
10632 fw_event_work_put(fw_event);
10633 ioc->current_event = NULL;
10637 switch (fw_event->event) {
10638 case MPT3SAS_PROCESS_TRIGGER_DIAG:
10639 mpt3sas_process_trigger_data(ioc,
10640 (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10641 fw_event->event_data);
10643 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
10644 while (scsi_host_in_recovery(ioc->shost) ||
10645 ioc->shost_recovery) {
10647 * If we're unloading or cancelling the work, bail.
10648 * Otherwise, this can become an infinite loop.
10650 if (ioc->remove_host || ioc->fw_events_cleanup)
10654 _scsih_remove_unresponding_devices(ioc);
10655 _scsih_del_dirty_vphy(ioc);
10656 _scsih_del_dirty_port_entries(ioc);
10657 _scsih_scan_for_devices_after_reset(ioc);
10659 * If diag reset has occurred during the driver load
10660 * then driver has to complete the driver load operation
10661 * by executing the following items:
10662 *- Register the devices from sas_device_init_list to SML
10663 *- clear is_driver_loading flag,
10664 *- start the watchdog thread.
10665 * In happy driver load path, above things are taken care of when
10666 * driver executes scsih_scan_finished().
10668 if (ioc->is_driver_loading)
10669 _scsih_complete_devices_scanning(ioc);
10670 _scsih_set_nvme_max_shutdown_latency(ioc);
10672 case MPT3SAS_PORT_ENABLE_COMPLETE:
10673 ioc->start_scan = 0;
10674 if (missing_delay[0] != -1 && missing_delay[1] != -1)
10675 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10678 ioc_info(ioc, "port enable: complete from worker thread\n"));
10680 case MPT3SAS_TURN_ON_PFA_LED:
10681 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
10683 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10684 _scsih_sas_topology_change_event(ioc, fw_event);
10686 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10687 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10688 _scsih_sas_device_status_change_event_debug(ioc,
10689 (Mpi2EventDataSasDeviceStatusChange_t *)
10690 fw_event->event_data);
10692 case MPI2_EVENT_SAS_DISCOVERY:
10693 _scsih_sas_discovery_event(ioc, fw_event);
10695 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10696 _scsih_sas_device_discovery_error_event(ioc, fw_event);
10698 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10699 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
10701 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10702 _scsih_sas_enclosure_dev_status_change_event(ioc,
10705 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10706 _scsih_sas_ir_config_change_event(ioc, fw_event);
10708 case MPI2_EVENT_IR_VOLUME:
10709 _scsih_sas_ir_volume_event(ioc, fw_event);
10711 case MPI2_EVENT_IR_PHYSICAL_DISK:
10712 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
10714 case MPI2_EVENT_IR_OPERATION_STATUS:
10715 _scsih_sas_ir_operation_status_event(ioc, fw_event);
10717 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10718 _scsih_pcie_device_status_change_event(ioc, fw_event);
10720 case MPI2_EVENT_PCIE_ENUMERATION:
10721 _scsih_pcie_enumeration_event(ioc, fw_event);
10723 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10724 _scsih_pcie_topology_change_event(ioc, fw_event);
10725 ioc->current_event = NULL;
10730 fw_event_work_put(fw_event);
10731 ioc->current_event = NULL;
10735 * _firmware_event_work
10736 * @work: The fw_event_work object
10739 * wrappers for the work thread handling firmware events
10743 _firmware_event_work(struct work_struct *work)
10745 struct fw_event_work *fw_event = container_of(work,
10746 struct fw_event_work, work);
10748 _mpt3sas_fw_work(fw_event->ioc, fw_event);
10752 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
10753 * @ioc: per adapter object
10754 * @msix_index: MSIX table index supplied by the OS
10755 * @reply: reply message frame(lower 32bit addr)
10756 * Context: interrupt.
10758 * This function merely adds a new work task into ioc->firmware_event_thread.
10759 * The tasks are worked from _firmware_event_work in user context.
10761 * Return: 1 meaning mf should be freed from _base_interrupt
10762 * 0 means the mf is freed from this function.
10765 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10768 struct fw_event_work *fw_event;
10769 Mpi2EventNotificationReply_t *mpi_reply;
10772 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
10774 /* events turned off due to host reset */
10775 if (ioc->pci_error_recovery)
10778 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10780 if (unlikely(!mpi_reply)) {
10781 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10782 __FILE__, __LINE__, __func__);
10786 event = le16_to_cpu(mpi_reply->Event);
10788 if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10789 mpt3sas_trigger_event(ioc, event, 0);
10793 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10795 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10796 (Mpi2EventDataSasBroadcastPrimitive_t *)
10797 mpi_reply->EventData;
10799 if (baen_data->Primitive !=
10800 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10803 if (ioc->broadcast_aen_busy) {
10804 ioc->broadcast_aen_pending++;
10807 ioc->broadcast_aen_busy = 1;
10811 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10812 _scsih_check_topo_delete_events(ioc,
10813 (Mpi2EventDataSasTopologyChangeList_t *)
10814 mpi_reply->EventData);
10816 * No need to add the topology change list
10817 * event to fw event work queue when
10818 * diag reset is going on. Since during diag
10819 * reset driver scan the devices by reading
10820 * sas device page0's not by processing the
10823 if (ioc->shost_recovery)
10826 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10827 _scsih_check_pcie_topo_remove_events(ioc,
10828 (Mpi26EventDataPCIeTopologyChangeList_t *)
10829 mpi_reply->EventData);
10830 if (ioc->shost_recovery)
10833 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10834 _scsih_check_ir_config_unhide_events(ioc,
10835 (Mpi2EventDataIrConfigChangeList_t *)
10836 mpi_reply->EventData);
10838 case MPI2_EVENT_IR_VOLUME:
10839 _scsih_check_volume_delete_events(ioc,
10840 (Mpi2EventDataIrVolume_t *)
10841 mpi_reply->EventData);
10843 case MPI2_EVENT_LOG_ENTRY_ADDED:
10845 Mpi2EventDataLogEntryAdded_t *log_entry;
10848 if (!ioc->is_warpdrive)
10851 log_entry = (Mpi2EventDataLogEntryAdded_t *)
10852 mpi_reply->EventData;
10853 log_code = (u32 *)log_entry->LogData;
10855 if (le16_to_cpu(log_entry->LogEntryQualifier)
10856 != MPT2_WARPDRIVE_LOGENTRY)
10859 switch (le32_to_cpu(*log_code)) {
10860 case MPT2_WARPDRIVE_LC_SSDT:
10861 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10863 case MPT2_WARPDRIVE_LC_SSDLW:
10864 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
10866 case MPT2_WARPDRIVE_LC_SSDLF:
10867 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
10869 case MPT2_WARPDRIVE_LC_BRMF:
10870 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10876 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10877 _scsih_sas_device_status_change_event(ioc,
10878 (Mpi2EventDataSasDeviceStatusChange_t *)
10879 mpi_reply->EventData);
10881 case MPI2_EVENT_IR_OPERATION_STATUS:
10882 case MPI2_EVENT_SAS_DISCOVERY:
10883 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10884 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10885 case MPI2_EVENT_IR_PHYSICAL_DISK:
10886 case MPI2_EVENT_PCIE_ENUMERATION:
10887 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10890 case MPI2_EVENT_TEMP_THRESHOLD:
10891 _scsih_temp_threshold_events(ioc,
10892 (Mpi2EventDataTemperature_t *)
10893 mpi_reply->EventData);
10895 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10896 ActiveCableEventData =
10897 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
10898 switch (ActiveCableEventData->ReasonCode) {
10899 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
10900 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
10901 ActiveCableEventData->ReceptacleID);
10902 pr_notice("cannot be powered and devices connected\n");
10903 pr_notice("to this active cable will not be seen\n");
10904 pr_notice("This active cable requires %d mW of power\n",
10906 ActiveCableEventData->ActiveCablePowerRequirement));
10909 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
10910 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
10911 ActiveCableEventData->ReceptacleID);
10913 "is not running at optimal speed(12 Gb/s rate)\n");
10919 default: /* ignore the rest */
10923 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
10924 fw_event = alloc_fw_event_work(sz);
10926 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10927 __FILE__, __LINE__, __func__);
10931 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
10932 fw_event->ioc = ioc;
10933 fw_event->VF_ID = mpi_reply->VF_ID;
10934 fw_event->VP_ID = mpi_reply->VP_ID;
10935 fw_event->event = event;
10936 _scsih_fw_event_add(ioc, fw_event);
10937 fw_event_work_put(fw_event);
10942 * _scsih_expander_node_remove - removing expander device from list.
10943 * @ioc: per adapter object
10944 * @sas_expander: the sas_device object
10946 * Removing object and freeing associated memory from the
10947 * ioc->sas_expander_list.
10950 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
10951 struct _sas_node *sas_expander)
10953 struct _sas_port *mpt3sas_port, *next;
10954 unsigned long flags;
10956 /* remove sibling ports attached to this expander */
10957 list_for_each_entry_safe(mpt3sas_port, next,
10958 &sas_expander->sas_port_list, port_list) {
10959 if (ioc->shost_recovery)
10961 if (mpt3sas_port->remote_identify.device_type ==
10963 mpt3sas_device_remove_by_sas_address(ioc,
10964 mpt3sas_port->remote_identify.sas_address,
10965 mpt3sas_port->hba_port);
10966 else if (mpt3sas_port->remote_identify.device_type ==
10967 SAS_EDGE_EXPANDER_DEVICE ||
10968 mpt3sas_port->remote_identify.device_type ==
10969 SAS_FANOUT_EXPANDER_DEVICE)
10970 mpt3sas_expander_remove(ioc,
10971 mpt3sas_port->remote_identify.sas_address,
10972 mpt3sas_port->hba_port);
10975 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
10976 sas_expander->sas_address_parent, sas_expander->port);
10979 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10980 sas_expander->handle, (unsigned long long)
10981 sas_expander->sas_address,
10982 sas_expander->port->port_id);
10984 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10985 list_del(&sas_expander->list);
10986 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10988 kfree(sas_expander->phy);
10989 kfree(sas_expander);
10993 * _scsih_nvme_shutdown - NVMe shutdown notification
10994 * @ioc: per adapter object
10996 * Sending IoUnitControl request with shutdown operation code to alert IOC that
10997 * the host system is shutting down so that IOC can issue NVMe shutdown to
10998 * NVMe drives attached to it.
11001 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
11003 Mpi26IoUnitControlRequest_t *mpi_request;
11004 Mpi26IoUnitControlReply_t *mpi_reply;
11007 /* are there any NVMe devices ? */
11008 if (list_empty(&ioc->pcie_device_list))
11011 mutex_lock(&ioc->scsih_cmds.mutex);
11013 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11014 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11018 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11020 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11023 "%s: failed obtaining a smid\n", __func__);
11024 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11028 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11029 ioc->scsih_cmds.smid = smid;
11030 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
11031 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
11032 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
11034 init_completion(&ioc->scsih_cmds.done);
11035 ioc->put_smid_default(ioc, smid);
11036 /* Wait for max_shutdown_latency seconds */
11038 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
11039 ioc->max_shutdown_latency);
11040 wait_for_completion_timeout(&ioc->scsih_cmds.done,
11041 ioc->max_shutdown_latency*HZ);
11043 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11044 ioc_err(ioc, "%s: timeout\n", __func__);
11048 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11049 mpi_reply = ioc->scsih_cmds.reply;
11050 ioc_info(ioc, "Io Unit Control shutdown (complete):"
11051 "ioc_status(0x%04x), loginfo(0x%08x)\n",
11052 le16_to_cpu(mpi_reply->IOCStatus),
11053 le32_to_cpu(mpi_reply->IOCLogInfo));
11056 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11057 mutex_unlock(&ioc->scsih_cmds.mutex);
11062 * _scsih_ir_shutdown - IR shutdown notification
11063 * @ioc: per adapter object
11065 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
11066 * the host system is shutting down.
11069 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
11071 Mpi2RaidActionRequest_t *mpi_request;
11072 Mpi2RaidActionReply_t *mpi_reply;
11075 /* is IR firmware build loaded ? */
11076 if (!ioc->ir_firmware)
11079 /* are there any volumes ? */
11080 if (list_empty(&ioc->raid_device_list))
11083 mutex_lock(&ioc->scsih_cmds.mutex);
11085 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11086 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11089 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11091 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11093 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
11094 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11098 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11099 ioc->scsih_cmds.smid = smid;
11100 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
11102 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
11103 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
11105 if (!ioc->hide_ir_msg)
11106 ioc_info(ioc, "IR shutdown (sending)\n");
11107 init_completion(&ioc->scsih_cmds.done);
11108 ioc->put_smid_default(ioc, smid);
11109 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11111 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11112 ioc_err(ioc, "%s: timeout\n", __func__);
11116 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11117 mpi_reply = ioc->scsih_cmds.reply;
11118 if (!ioc->hide_ir_msg)
11119 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11120 le16_to_cpu(mpi_reply->IOCStatus),
11121 le32_to_cpu(mpi_reply->IOCLogInfo));
11125 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11126 mutex_unlock(&ioc->scsih_cmds.mutex);
11130 * _scsih_get_shost_and_ioc - get shost and ioc
11131 * and verify whether they are NULL or not
11132 * @pdev: PCI device struct
11133 * @shost: address of scsi host pointer
11134 * @ioc: address of HBA adapter pointer
11136 * Return zero if *shost and *ioc are not NULL otherwise return error number.
11139 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
11140 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11142 *shost = pci_get_drvdata(pdev);
11143 if (*shost == NULL) {
11144 dev_err(&pdev->dev, "pdev's driver data is null\n");
11148 *ioc = shost_priv(*shost);
11149 if (*ioc == NULL) {
11150 dev_err(&pdev->dev, "shost's private data is null\n");
11158 * scsih_remove - detach and remove add host
11159 * @pdev: PCI device struct
11161 * Routine called when unloading the driver.
11163 static void scsih_remove(struct pci_dev *pdev)
11165 struct Scsi_Host *shost;
11166 struct MPT3SAS_ADAPTER *ioc;
11167 struct _sas_port *mpt3sas_port, *next_port;
11168 struct _raid_device *raid_device, *next;
11169 struct MPT3SAS_TARGET *sas_target_priv_data;
11170 struct _pcie_device *pcie_device, *pcienext;
11171 struct workqueue_struct *wq;
11172 unsigned long flags;
11173 Mpi2ConfigReply_t mpi_reply;
11174 struct hba_port *port, *port_next;
11176 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11179 ioc->remove_host = 1;
11181 if (!pci_device_is_present(pdev))
11182 _scsih_flush_running_cmds(ioc);
11184 _scsih_fw_event_cleanup_queue(ioc);
11186 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11187 wq = ioc->firmware_event_thread;
11188 ioc->firmware_event_thread = NULL;
11189 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11191 destroy_workqueue(wq);
11193 * Copy back the unmodified ioc page1. so that on next driver load,
11194 * current modified changes on ioc page1 won't take effect.
11196 if (ioc->is_aero_ioc)
11197 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11198 &ioc->ioc_pg1_copy);
11199 /* release all the volumes */
11200 _scsih_ir_shutdown(ioc);
11201 mpt3sas_destroy_debugfs(ioc);
11202 sas_remove_host(shost);
11203 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11205 if (raid_device->starget) {
11206 sas_target_priv_data =
11207 raid_device->starget->hostdata;
11208 sas_target_priv_data->deleted = 1;
11209 scsi_remove_target(&raid_device->starget->dev);
11211 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11212 raid_device->handle, (u64)raid_device->wwid);
11213 _scsih_raid_device_remove(ioc, raid_device);
11215 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11217 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11218 list_del_init(&pcie_device->list);
11219 pcie_device_put(pcie_device);
11222 /* free ports attached to the sas_host */
11223 list_for_each_entry_safe(mpt3sas_port, next_port,
11224 &ioc->sas_hba.sas_port_list, port_list) {
11225 if (mpt3sas_port->remote_identify.device_type ==
11227 mpt3sas_device_remove_by_sas_address(ioc,
11228 mpt3sas_port->remote_identify.sas_address,
11229 mpt3sas_port->hba_port);
11230 else if (mpt3sas_port->remote_identify.device_type ==
11231 SAS_EDGE_EXPANDER_DEVICE ||
11232 mpt3sas_port->remote_identify.device_type ==
11233 SAS_FANOUT_EXPANDER_DEVICE)
11234 mpt3sas_expander_remove(ioc,
11235 mpt3sas_port->remote_identify.sas_address,
11236 mpt3sas_port->hba_port);
11239 list_for_each_entry_safe(port, port_next,
11240 &ioc->port_table_list, list) {
11241 list_del(&port->list);
11245 /* free phys attached to the sas_host */
11246 if (ioc->sas_hba.num_phys) {
11247 kfree(ioc->sas_hba.phy);
11248 ioc->sas_hba.phy = NULL;
11249 ioc->sas_hba.num_phys = 0;
11252 mpt3sas_base_detach(ioc);
11253 spin_lock(&gioc_lock);
11254 list_del(&ioc->list);
11255 spin_unlock(&gioc_lock);
11256 scsi_host_put(shost);
11260 * scsih_shutdown - routine call during system shutdown
11261 * @pdev: PCI device struct
11264 scsih_shutdown(struct pci_dev *pdev)
11266 struct Scsi_Host *shost;
11267 struct MPT3SAS_ADAPTER *ioc;
11268 struct workqueue_struct *wq;
11269 unsigned long flags;
11270 Mpi2ConfigReply_t mpi_reply;
11272 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11275 ioc->remove_host = 1;
11277 if (!pci_device_is_present(pdev))
11278 _scsih_flush_running_cmds(ioc);
11280 _scsih_fw_event_cleanup_queue(ioc);
11282 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11283 wq = ioc->firmware_event_thread;
11284 ioc->firmware_event_thread = NULL;
11285 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11287 destroy_workqueue(wq);
11289 * Copy back the unmodified ioc page1 so that on next driver load,
11290 * current modified changes on ioc page1 won't take effect.
11292 if (ioc->is_aero_ioc)
11293 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11294 &ioc->ioc_pg1_copy);
11296 _scsih_ir_shutdown(ioc);
11297 _scsih_nvme_shutdown(ioc);
11298 mpt3sas_base_detach(ioc);
11303 * _scsih_probe_boot_devices - reports 1st device
11304 * @ioc: per adapter object
11306 * If specified in bios page 2, this routine reports the 1st
11307 * device scsi-ml or sas transport for persistent boot device
11308 * purposes. Please refer to function _scsih_determine_boot_device()
11311 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11315 struct _sas_device *sas_device;
11316 struct _raid_device *raid_device;
11317 struct _pcie_device *pcie_device;
11319 u64 sas_address_parent;
11321 unsigned long flags;
11324 struct hba_port *port;
11326 /* no Bios, return immediately */
11327 if (!ioc->bios_pg3.BiosVersion)
11331 if (ioc->req_boot_device.device) {
11332 device = ioc->req_boot_device.device;
11333 channel = ioc->req_boot_device.channel;
11334 } else if (ioc->req_alt_boot_device.device) {
11335 device = ioc->req_alt_boot_device.device;
11336 channel = ioc->req_alt_boot_device.channel;
11337 } else if (ioc->current_boot_device.device) {
11338 device = ioc->current_boot_device.device;
11339 channel = ioc->current_boot_device.channel;
11345 if (channel == RAID_CHANNEL) {
11346 raid_device = device;
11348 * If this boot vd is already registered with SML then
11349 * no need to register it again as part of device scanning
11350 * after diag reset during driver load operation.
11352 if (raid_device->starget)
11354 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11355 raid_device->id, 0);
11357 _scsih_raid_device_remove(ioc, raid_device);
11358 } else if (channel == PCIE_CHANNEL) {
11359 pcie_device = device;
11361 * If this boot NVMe device is already registered with SML then
11362 * no need to register it again as part of device scanning
11363 * after diag reset during driver load operation.
11365 if (pcie_device->starget)
11367 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11368 tid = pcie_device->id;
11369 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11370 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11371 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11373 _scsih_pcie_device_remove(ioc, pcie_device);
11375 sas_device = device;
11377 * If this boot sas/sata device is already registered with SML
11378 * then no need to register it again as part of device scanning
11379 * after diag reset during driver load operation.
11381 if (sas_device->starget)
11383 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11384 handle = sas_device->handle;
11385 sas_address_parent = sas_device->sas_address_parent;
11386 sas_address = sas_device->sas_address;
11387 port = sas_device->port;
11388 list_move_tail(&sas_device->list, &ioc->sas_device_list);
11389 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11391 if (ioc->hide_drives)
11397 if (!mpt3sas_transport_port_add(ioc, handle,
11398 sas_address_parent, port)) {
11399 _scsih_sas_device_remove(ioc, sas_device);
11400 } else if (!sas_device->starget) {
11401 if (!ioc->is_driver_loading) {
11402 mpt3sas_transport_port_remove(ioc,
11404 sas_address_parent, port);
11405 _scsih_sas_device_remove(ioc, sas_device);
11412 * _scsih_probe_raid - reporting raid volumes to scsi-ml
11413 * @ioc: per adapter object
11415 * Called during initial loading of the driver.
11418 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11420 struct _raid_device *raid_device, *raid_next;
11423 list_for_each_entry_safe(raid_device, raid_next,
11424 &ioc->raid_device_list, list) {
11425 if (raid_device->starget)
11427 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11428 raid_device->id, 0);
11430 _scsih_raid_device_remove(ioc, raid_device);
11434 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11436 struct _sas_device *sas_device = NULL;
11437 unsigned long flags;
11439 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11440 if (!list_empty(&ioc->sas_device_init_list)) {
11441 sas_device = list_first_entry(&ioc->sas_device_init_list,
11442 struct _sas_device, list);
11443 sas_device_get(sas_device);
11445 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11450 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11451 struct _sas_device *sas_device)
11453 unsigned long flags;
11455 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11458 * Since we dropped the lock during the call to port_add(), we need to
11459 * be careful here that somebody else didn't move or delete this item
11460 * while we were busy with other things.
11462 * If it was on the list, we need a put() for the reference the list
11463 * had. Either way, we need a get() for the destination list.
11465 if (!list_empty(&sas_device->list)) {
11466 list_del_init(&sas_device->list);
11467 sas_device_put(sas_device);
11470 sas_device_get(sas_device);
11471 list_add_tail(&sas_device->list, &ioc->sas_device_list);
11473 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11477 * _scsih_probe_sas - reporting sas devices to sas transport
11478 * @ioc: per adapter object
11480 * Called during initial loading of the driver.
11483 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11485 struct _sas_device *sas_device;
11487 if (ioc->hide_drives)
11490 while ((sas_device = get_next_sas_device(ioc))) {
11491 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
11492 sas_device->sas_address_parent, sas_device->port)) {
11493 _scsih_sas_device_remove(ioc, sas_device);
11494 sas_device_put(sas_device);
11496 } else if (!sas_device->starget) {
11498 * When asyn scanning is enabled, its not possible to
11499 * remove devices while scanning is turned on due to an
11500 * oops in scsi_sysfs_add_sdev()->add_device()->
11501 * sysfs_addrm_start()
11503 if (!ioc->is_driver_loading) {
11504 mpt3sas_transport_port_remove(ioc,
11505 sas_device->sas_address,
11506 sas_device->sas_address_parent,
11508 _scsih_sas_device_remove(ioc, sas_device);
11509 sas_device_put(sas_device);
11513 sas_device_make_active(ioc, sas_device);
11514 sas_device_put(sas_device);
11519 * get_next_pcie_device - Get the next pcie device
11520 * @ioc: per adapter object
11522 * Get the next pcie device from pcie_device_init_list list.
11524 * Return: pcie device structure if pcie_device_init_list list is not empty
11525 * otherwise returns NULL
11527 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11529 struct _pcie_device *pcie_device = NULL;
11530 unsigned long flags;
11532 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11533 if (!list_empty(&ioc->pcie_device_init_list)) {
11534 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11535 struct _pcie_device, list);
11536 pcie_device_get(pcie_device);
11538 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11540 return pcie_device;
11544 * pcie_device_make_active - Add pcie device to pcie_device_list list
11545 * @ioc: per adapter object
11546 * @pcie_device: pcie device object
11548 * Add the pcie device which has registered with SCSI Transport Later to
11549 * pcie_device_list list
11551 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11552 struct _pcie_device *pcie_device)
11554 unsigned long flags;
11556 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11558 if (!list_empty(&pcie_device->list)) {
11559 list_del_init(&pcie_device->list);
11560 pcie_device_put(pcie_device);
11562 pcie_device_get(pcie_device);
11563 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11565 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11569 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
11570 * @ioc: per adapter object
11572 * Called during initial loading of the driver.
11575 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11577 struct _pcie_device *pcie_device;
11580 /* PCIe Device List */
11581 while ((pcie_device = get_next_pcie_device(ioc))) {
11582 if (pcie_device->starget) {
11583 pcie_device_put(pcie_device);
11586 if (pcie_device->access_status ==
11587 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11588 pcie_device_make_active(ioc, pcie_device);
11589 pcie_device_put(pcie_device);
11592 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11593 pcie_device->id, 0);
11595 _scsih_pcie_device_remove(ioc, pcie_device);
11596 pcie_device_put(pcie_device);
11598 } else if (!pcie_device->starget) {
11600 * When async scanning is enabled, its not possible to
11601 * remove devices while scanning is turned on due to an
11602 * oops in scsi_sysfs_add_sdev()->add_device()->
11603 * sysfs_addrm_start()
11605 if (!ioc->is_driver_loading) {
11606 /* TODO-- Need to find out whether this condition will
11609 _scsih_pcie_device_remove(ioc, pcie_device);
11610 pcie_device_put(pcie_device);
11614 pcie_device_make_active(ioc, pcie_device);
11615 pcie_device_put(pcie_device);
11620 * _scsih_probe_devices - probing for devices
11621 * @ioc: per adapter object
11623 * Called during initial loading of the driver.
11626 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11628 u16 volume_mapping_flags;
11630 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11631 return; /* return when IOC doesn't support initiator mode */
11633 _scsih_probe_boot_devices(ioc);
11635 if (ioc->ir_firmware) {
11636 volume_mapping_flags =
11637 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11638 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11639 if (volume_mapping_flags ==
11640 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11641 _scsih_probe_raid(ioc);
11642 _scsih_probe_sas(ioc);
11644 _scsih_probe_sas(ioc);
11645 _scsih_probe_raid(ioc);
11648 _scsih_probe_sas(ioc);
11649 _scsih_probe_pcie(ioc);
11654 * scsih_scan_start - scsi lld callback for .scan_start
11655 * @shost: SCSI host pointer
11657 * The shost has the ability to discover targets on its own instead
11658 * of scanning the entire bus. In our implemention, we will kick off
11659 * firmware discovery.
11662 scsih_scan_start(struct Scsi_Host *shost)
11664 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11666 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11667 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
11668 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11669 mpt3sas_enable_diag_buffer(ioc, 1);
11671 if (disable_discovery > 0)
11674 ioc->start_scan = 1;
11675 rc = mpt3sas_port_enable(ioc);
11678 ioc_info(ioc, "port enable: FAILED\n");
11682 * _scsih_complete_devices_scanning - add the devices to sml and
11683 * complete ioc initialization.
11684 * @ioc: per adapter object
11688 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
11691 if (ioc->wait_for_discovery_to_complete) {
11692 ioc->wait_for_discovery_to_complete = 0;
11693 _scsih_probe_devices(ioc);
11696 mpt3sas_base_start_watchdog(ioc);
11697 ioc->is_driver_loading = 0;
11701 * scsih_scan_finished - scsi lld callback for .scan_finished
11702 * @shost: SCSI host pointer
11703 * @time: elapsed time of the scan in jiffies
11705 * This function will be called periodicallyn until it returns 1 with the
11706 * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
11707 * we wait for firmware discovery to complete, then return 1.
11710 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
11712 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11714 int issue_hard_reset = 0;
11716 if (disable_discovery > 0) {
11717 ioc->is_driver_loading = 0;
11718 ioc->wait_for_discovery_to_complete = 0;
11722 if (time >= (300 * HZ)) {
11723 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11724 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
11725 ioc->is_driver_loading = 0;
11729 if (ioc->start_scan) {
11730 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
11731 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
11732 mpt3sas_print_fault_code(ioc, ioc_state &
11733 MPI2_DOORBELL_DATA_MASK);
11734 issue_hard_reset = 1;
11736 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
11737 MPI2_IOC_STATE_COREDUMP) {
11738 mpt3sas_base_coredump_info(ioc, ioc_state &
11739 MPI2_DOORBELL_DATA_MASK);
11740 mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
11741 issue_hard_reset = 1;
11747 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
11749 "port enable: aborted due to diag reset\n");
11750 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11753 if (ioc->start_scan_failed) {
11754 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11755 ioc->start_scan_failed);
11756 ioc->is_driver_loading = 0;
11757 ioc->wait_for_discovery_to_complete = 0;
11758 ioc->remove_host = 1;
11762 ioc_info(ioc, "port enable: SUCCESS\n");
11763 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11764 _scsih_complete_devices_scanning(ioc);
11767 if (issue_hard_reset) {
11768 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11769 if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
11770 ioc->is_driver_loading = 0;
11776 * scsih_map_queues - map reply queues with request queues
11777 * @shost: SCSI host pointer
11779 static int scsih_map_queues(struct Scsi_Host *shost)
11781 struct MPT3SAS_ADAPTER *ioc =
11782 (struct MPT3SAS_ADAPTER *)shost->hostdata;
11784 if (ioc->shost->nr_hw_queues == 1)
11787 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
11788 ioc->pdev, ioc->high_iops_queues);
11791 /* shost template for SAS 2.0 HBA devices */
11792 static struct scsi_host_template mpt2sas_driver_template = {
11793 .module = THIS_MODULE,
11794 .name = "Fusion MPT SAS Host",
11795 .proc_name = MPT2SAS_DRIVER_NAME,
11796 .queuecommand = scsih_qcmd,
11797 .target_alloc = scsih_target_alloc,
11798 .slave_alloc = scsih_slave_alloc,
11799 .slave_configure = scsih_slave_configure,
11800 .target_destroy = scsih_target_destroy,
11801 .slave_destroy = scsih_slave_destroy,
11802 .scan_finished = scsih_scan_finished,
11803 .scan_start = scsih_scan_start,
11804 .change_queue_depth = scsih_change_queue_depth,
11805 .eh_abort_handler = scsih_abort,
11806 .eh_device_reset_handler = scsih_dev_reset,
11807 .eh_target_reset_handler = scsih_target_reset,
11808 .eh_host_reset_handler = scsih_host_reset,
11809 .bios_param = scsih_bios_param,
11812 .sg_tablesize = MPT2SAS_SG_DEPTH,
11813 .max_sectors = 32767,
11815 .shost_attrs = mpt3sas_host_attrs,
11816 .sdev_attrs = mpt3sas_dev_attrs,
11817 .track_queue_depth = 1,
11818 .cmd_size = sizeof(struct scsiio_tracker),
11821 /* raid transport support for SAS 2.0 HBA devices */
11822 static struct raid_function_template mpt2sas_raid_functions = {
11823 .cookie = &mpt2sas_driver_template,
11824 .is_raid = scsih_is_raid,
11825 .get_resync = scsih_get_resync,
11826 .get_state = scsih_get_state,
11829 /* shost template for SAS 3.0 HBA devices */
11830 static struct scsi_host_template mpt3sas_driver_template = {
11831 .module = THIS_MODULE,
11832 .name = "Fusion MPT SAS Host",
11833 .proc_name = MPT3SAS_DRIVER_NAME,
11834 .queuecommand = scsih_qcmd,
11835 .target_alloc = scsih_target_alloc,
11836 .slave_alloc = scsih_slave_alloc,
11837 .slave_configure = scsih_slave_configure,
11838 .target_destroy = scsih_target_destroy,
11839 .slave_destroy = scsih_slave_destroy,
11840 .scan_finished = scsih_scan_finished,
11841 .scan_start = scsih_scan_start,
11842 .change_queue_depth = scsih_change_queue_depth,
11843 .eh_abort_handler = scsih_abort,
11844 .eh_device_reset_handler = scsih_dev_reset,
11845 .eh_target_reset_handler = scsih_target_reset,
11846 .eh_host_reset_handler = scsih_host_reset,
11847 .bios_param = scsih_bios_param,
11850 .sg_tablesize = MPT3SAS_SG_DEPTH,
11851 .max_sectors = 32767,
11852 .max_segment_size = 0xffffffff,
11854 .shost_attrs = mpt3sas_host_attrs,
11855 .sdev_attrs = mpt3sas_dev_attrs,
11856 .track_queue_depth = 1,
11857 .cmd_size = sizeof(struct scsiio_tracker),
11858 .map_queues = scsih_map_queues,
11861 /* raid transport support for SAS 3.0 HBA devices */
11862 static struct raid_function_template mpt3sas_raid_functions = {
11863 .cookie = &mpt3sas_driver_template,
11864 .is_raid = scsih_is_raid,
11865 .get_resync = scsih_get_resync,
11866 .get_state = scsih_get_state,
11870 * _scsih_determine_hba_mpi_version - determine in which MPI version class
11871 * this device belongs to.
11872 * @pdev: PCI device struct
11874 * return MPI2_VERSION for SAS 2.0 HBA devices,
11875 * MPI25_VERSION for SAS 3.0 HBA devices, and
11876 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
11879 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
11882 switch (pdev->device) {
11883 case MPI2_MFGPAGE_DEVID_SSS6200:
11884 case MPI2_MFGPAGE_DEVID_SAS2004:
11885 case MPI2_MFGPAGE_DEVID_SAS2008:
11886 case MPI2_MFGPAGE_DEVID_SAS2108_1:
11887 case MPI2_MFGPAGE_DEVID_SAS2108_2:
11888 case MPI2_MFGPAGE_DEVID_SAS2108_3:
11889 case MPI2_MFGPAGE_DEVID_SAS2116_1:
11890 case MPI2_MFGPAGE_DEVID_SAS2116_2:
11891 case MPI2_MFGPAGE_DEVID_SAS2208_1:
11892 case MPI2_MFGPAGE_DEVID_SAS2208_2:
11893 case MPI2_MFGPAGE_DEVID_SAS2208_3:
11894 case MPI2_MFGPAGE_DEVID_SAS2208_4:
11895 case MPI2_MFGPAGE_DEVID_SAS2208_5:
11896 case MPI2_MFGPAGE_DEVID_SAS2208_6:
11897 case MPI2_MFGPAGE_DEVID_SAS2308_1:
11898 case MPI2_MFGPAGE_DEVID_SAS2308_2:
11899 case MPI2_MFGPAGE_DEVID_SAS2308_3:
11900 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
11901 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
11902 return MPI2_VERSION;
11903 case MPI25_MFGPAGE_DEVID_SAS3004:
11904 case MPI25_MFGPAGE_DEVID_SAS3008:
11905 case MPI25_MFGPAGE_DEVID_SAS3108_1:
11906 case MPI25_MFGPAGE_DEVID_SAS3108_2:
11907 case MPI25_MFGPAGE_DEVID_SAS3108_5:
11908 case MPI25_MFGPAGE_DEVID_SAS3108_6:
11909 return MPI25_VERSION;
11910 case MPI26_MFGPAGE_DEVID_SAS3216:
11911 case MPI26_MFGPAGE_DEVID_SAS3224:
11912 case MPI26_MFGPAGE_DEVID_SAS3316_1:
11913 case MPI26_MFGPAGE_DEVID_SAS3316_2:
11914 case MPI26_MFGPAGE_DEVID_SAS3316_3:
11915 case MPI26_MFGPAGE_DEVID_SAS3316_4:
11916 case MPI26_MFGPAGE_DEVID_SAS3324_1:
11917 case MPI26_MFGPAGE_DEVID_SAS3324_2:
11918 case MPI26_MFGPAGE_DEVID_SAS3324_3:
11919 case MPI26_MFGPAGE_DEVID_SAS3324_4:
11920 case MPI26_MFGPAGE_DEVID_SAS3508:
11921 case MPI26_MFGPAGE_DEVID_SAS3508_1:
11922 case MPI26_MFGPAGE_DEVID_SAS3408:
11923 case MPI26_MFGPAGE_DEVID_SAS3516:
11924 case MPI26_MFGPAGE_DEVID_SAS3516_1:
11925 case MPI26_MFGPAGE_DEVID_SAS3416:
11926 case MPI26_MFGPAGE_DEVID_SAS3616:
11927 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
11928 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
11929 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
11930 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
11931 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
11932 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
11933 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
11934 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
11935 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
11936 return MPI26_VERSION;
11942 * _scsih_probe - attach and add scsi host
11943 * @pdev: PCI device struct
11944 * @id: pci device id
11946 * Return: 0 success, anything else error.
11949 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
11951 struct MPT3SAS_ADAPTER *ioc;
11952 struct Scsi_Host *shost = NULL;
11954 u16 hba_mpi_version;
11956 /* Determine in which MPI version class this pci device belongs */
11957 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
11958 if (hba_mpi_version == 0)
11961 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
11962 * for other generation HBA's return with -ENODEV
11964 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
11967 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
11968 * for other generation HBA's return with -ENODEV
11970 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
11971 || hba_mpi_version == MPI26_VERSION)))
11974 switch (hba_mpi_version) {
11976 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
11977 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
11978 /* Use mpt2sas driver host template for SAS 2.0 HBA's */
11979 shost = scsi_host_alloc(&mpt2sas_driver_template,
11980 sizeof(struct MPT3SAS_ADAPTER));
11983 ioc = shost_priv(shost);
11984 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
11985 ioc->hba_mpi_version_belonged = hba_mpi_version;
11986 ioc->id = mpt2_ids++;
11987 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
11988 switch (pdev->device) {
11989 case MPI2_MFGPAGE_DEVID_SSS6200:
11990 ioc->is_warpdrive = 1;
11991 ioc->hide_ir_msg = 1;
11993 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
11994 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
11995 ioc->is_mcpu_endpoint = 1;
11998 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
12002 if (multipath_on_hba == -1 || multipath_on_hba == 0)
12003 ioc->multipath_on_hba = 0;
12005 ioc->multipath_on_hba = 1;
12008 case MPI25_VERSION:
12009 case MPI26_VERSION:
12010 /* Use mpt3sas driver host template for SAS 3.0 HBA's */
12011 shost = scsi_host_alloc(&mpt3sas_driver_template,
12012 sizeof(struct MPT3SAS_ADAPTER));
12015 ioc = shost_priv(shost);
12016 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12017 ioc->hba_mpi_version_belonged = hba_mpi_version;
12018 ioc->id = mpt3_ids++;
12019 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
12020 switch (pdev->device) {
12021 case MPI26_MFGPAGE_DEVID_SAS3508:
12022 case MPI26_MFGPAGE_DEVID_SAS3508_1:
12023 case MPI26_MFGPAGE_DEVID_SAS3408:
12024 case MPI26_MFGPAGE_DEVID_SAS3516:
12025 case MPI26_MFGPAGE_DEVID_SAS3516_1:
12026 case MPI26_MFGPAGE_DEVID_SAS3416:
12027 case MPI26_MFGPAGE_DEVID_SAS3616:
12028 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12029 ioc->is_gen35_ioc = 1;
12031 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12032 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12033 dev_err(&pdev->dev,
12034 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
12035 pdev->device, pdev->subsystem_vendor,
12036 pdev->subsystem_device);
12038 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12039 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12040 dev_err(&pdev->dev,
12041 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
12042 pdev->device, pdev->subsystem_vendor,
12043 pdev->subsystem_device);
12045 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12046 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12047 dev_info(&pdev->dev,
12048 "HBA is in Configurable Secure mode\n");
12050 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12051 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12052 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
12055 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
12057 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
12058 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
12059 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
12060 ioc->combined_reply_queue = 1;
12061 if (ioc->is_gen35_ioc)
12062 ioc->combined_reply_index_count =
12063 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
12065 ioc->combined_reply_index_count =
12066 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
12069 switch (ioc->is_gen35_ioc) {
12071 if (multipath_on_hba == -1 || multipath_on_hba == 0)
12072 ioc->multipath_on_hba = 0;
12074 ioc->multipath_on_hba = 1;
12077 if (multipath_on_hba == -1 || multipath_on_hba > 0)
12078 ioc->multipath_on_hba = 1;
12080 ioc->multipath_on_hba = 0;
12091 INIT_LIST_HEAD(&ioc->list);
12092 spin_lock(&gioc_lock);
12093 list_add_tail(&ioc->list, &mpt3sas_ioc_list);
12094 spin_unlock(&gioc_lock);
12095 ioc->shost = shost;
12097 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
12098 ioc->tm_cb_idx = tm_cb_idx;
12099 ioc->ctl_cb_idx = ctl_cb_idx;
12100 ioc->base_cb_idx = base_cb_idx;
12101 ioc->port_enable_cb_idx = port_enable_cb_idx;
12102 ioc->transport_cb_idx = transport_cb_idx;
12103 ioc->scsih_cb_idx = scsih_cb_idx;
12104 ioc->config_cb_idx = config_cb_idx;
12105 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
12106 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
12107 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
12108 ioc->logging_level = logging_level;
12109 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
12110 /* Host waits for minimum of six seconds */
12111 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
12113 * Enable MEMORY MOVE support flag.
12115 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
12116 /* Enable ADDITIONAL QUERY support flag. */
12117 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
12119 ioc->enable_sdev_max_qd = enable_sdev_max_qd;
12121 /* misc semaphores and spin locks */
12122 mutex_init(&ioc->reset_in_progress_mutex);
12123 /* initializing pci_access_mutex lock */
12124 mutex_init(&ioc->pci_access_mutex);
12125 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
12126 spin_lock_init(&ioc->scsi_lookup_lock);
12127 spin_lock_init(&ioc->sas_device_lock);
12128 spin_lock_init(&ioc->sas_node_lock);
12129 spin_lock_init(&ioc->fw_event_lock);
12130 spin_lock_init(&ioc->raid_device_lock);
12131 spin_lock_init(&ioc->pcie_device_lock);
12132 spin_lock_init(&ioc->diag_trigger_lock);
12134 INIT_LIST_HEAD(&ioc->sas_device_list);
12135 INIT_LIST_HEAD(&ioc->sas_device_init_list);
12136 INIT_LIST_HEAD(&ioc->sas_expander_list);
12137 INIT_LIST_HEAD(&ioc->enclosure_list);
12138 INIT_LIST_HEAD(&ioc->pcie_device_list);
12139 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
12140 INIT_LIST_HEAD(&ioc->fw_event_list);
12141 INIT_LIST_HEAD(&ioc->raid_device_list);
12142 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
12143 INIT_LIST_HEAD(&ioc->delayed_tr_list);
12144 INIT_LIST_HEAD(&ioc->delayed_sc_list);
12145 INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
12146 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
12147 INIT_LIST_HEAD(&ioc->reply_queue_list);
12148 INIT_LIST_HEAD(&ioc->port_table_list);
12150 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
12152 /* init shost parameters */
12153 shost->max_cmd_len = 32;
12154 shost->max_lun = max_lun;
12155 shost->transportt = mpt3sas_transport_template;
12156 shost->unique_id = ioc->id;
12158 if (ioc->is_mcpu_endpoint) {
12159 /* mCPU MPI support 64K max IO */
12160 shost->max_sectors = 128;
12161 ioc_info(ioc, "The max_sectors value is set to %d\n",
12162 shost->max_sectors);
12164 if (max_sectors != 0xFFFF) {
12165 if (max_sectors < 64) {
12166 shost->max_sectors = 64;
12167 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
12169 } else if (max_sectors > 32767) {
12170 shost->max_sectors = 32767;
12171 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
12174 shost->max_sectors = max_sectors & 0xFFFE;
12175 ioc_info(ioc, "The max_sectors value is set to %d\n",
12176 shost->max_sectors);
12180 /* register EEDP capabilities with SCSI layer */
12181 if (prot_mask >= 0)
12182 scsi_host_set_prot(shost, (prot_mask & 0x07));
12184 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
12185 | SHOST_DIF_TYPE2_PROTECTION
12186 | SHOST_DIF_TYPE3_PROTECTION);
12188 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
12191 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
12192 "fw_event_%s%d", ioc->driver_name, ioc->id);
12193 ioc->firmware_event_thread = alloc_ordered_workqueue(
12194 ioc->firmware_event_name, 0);
12195 if (!ioc->firmware_event_thread) {
12196 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12197 __FILE__, __LINE__, __func__);
12199 goto out_thread_fail;
12202 ioc->is_driver_loading = 1;
12203 if ((mpt3sas_base_attach(ioc))) {
12204 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12205 __FILE__, __LINE__, __func__);
12207 goto out_attach_fail;
12210 if (ioc->is_warpdrive) {
12211 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
12212 ioc->hide_drives = 0;
12213 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
12214 ioc->hide_drives = 1;
12216 if (mpt3sas_get_num_volumes(ioc))
12217 ioc->hide_drives = 1;
12219 ioc->hide_drives = 0;
12222 ioc->hide_drives = 0;
12224 shost->host_tagset = 0;
12225 shost->nr_hw_queues = 1;
12227 if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 &&
12228 host_tagset_enable && ioc->smp_affinity_enable) {
12230 shost->host_tagset = 1;
12231 shost->nr_hw_queues =
12232 ioc->reply_queue_count - ioc->high_iops_queues;
12234 dev_info(&ioc->pdev->dev,
12235 "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
12236 shost->can_queue, shost->nr_hw_queues);
12239 rv = scsi_add_host(shost, &pdev->dev);
12241 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12242 __FILE__, __LINE__, __func__);
12243 goto out_add_shost_fail;
12246 scsi_scan_host(shost);
12247 mpt3sas_setup_debugfs(ioc);
12249 out_add_shost_fail:
12250 mpt3sas_base_detach(ioc);
12252 destroy_workqueue(ioc->firmware_event_thread);
12254 spin_lock(&gioc_lock);
12255 list_del(&ioc->list);
12256 spin_unlock(&gioc_lock);
12257 scsi_host_put(shost);
12262 * scsih_suspend - power management suspend main entry point
12263 * @dev: Device struct
12265 * Return: 0 success, anything else error.
12267 static int __maybe_unused
12268 scsih_suspend(struct device *dev)
12270 struct pci_dev *pdev = to_pci_dev(dev);
12271 struct Scsi_Host *shost;
12272 struct MPT3SAS_ADAPTER *ioc;
12275 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12279 mpt3sas_base_stop_watchdog(ioc);
12280 flush_scheduled_work();
12281 scsi_block_requests(shost);
12282 _scsih_nvme_shutdown(ioc);
12283 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12284 pdev, pci_name(pdev));
12286 mpt3sas_base_free_resources(ioc);
12291 * scsih_resume - power management resume main entry point
12292 * @dev: Device struct
12294 * Return: 0 success, anything else error.
12296 static int __maybe_unused
12297 scsih_resume(struct device *dev)
12299 struct pci_dev *pdev = to_pci_dev(dev);
12300 struct Scsi_Host *shost;
12301 struct MPT3SAS_ADAPTER *ioc;
12302 pci_power_t device_state = pdev->current_state;
12305 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12309 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12310 pdev, pci_name(pdev), device_state);
12313 r = mpt3sas_base_map_resources(ioc);
12316 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
12317 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
12318 scsi_unblock_requests(shost);
12319 mpt3sas_base_start_watchdog(ioc);
12324 * scsih_pci_error_detected - Called when a PCI error is detected.
12325 * @pdev: PCI device struct
12326 * @state: PCI channel state
12328 * Description: Called when a PCI error is detected.
12330 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
12332 static pci_ers_result_t
12333 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12335 struct Scsi_Host *shost;
12336 struct MPT3SAS_ADAPTER *ioc;
12338 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12339 return PCI_ERS_RESULT_DISCONNECT;
12341 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
12344 case pci_channel_io_normal:
12345 return PCI_ERS_RESULT_CAN_RECOVER;
12346 case pci_channel_io_frozen:
12347 /* Fatal error, prepare for slot reset */
12348 ioc->pci_error_recovery = 1;
12349 scsi_block_requests(ioc->shost);
12350 mpt3sas_base_stop_watchdog(ioc);
12351 mpt3sas_base_free_resources(ioc);
12352 return PCI_ERS_RESULT_NEED_RESET;
12353 case pci_channel_io_perm_failure:
12354 /* Permanent error, prepare for device removal */
12355 ioc->pci_error_recovery = 1;
12356 mpt3sas_base_stop_watchdog(ioc);
12357 _scsih_flush_running_cmds(ioc);
12358 return PCI_ERS_RESULT_DISCONNECT;
12360 return PCI_ERS_RESULT_NEED_RESET;
12364 * scsih_pci_slot_reset - Called when PCI slot has been reset.
12365 * @pdev: PCI device struct
12367 * Description: This routine is called by the pci error recovery
12368 * code after the PCI slot has been reset, just before we
12369 * should resume normal operations.
12371 static pci_ers_result_t
12372 scsih_pci_slot_reset(struct pci_dev *pdev)
12374 struct Scsi_Host *shost;
12375 struct MPT3SAS_ADAPTER *ioc;
12378 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12379 return PCI_ERS_RESULT_DISCONNECT;
12381 ioc_info(ioc, "PCI error: slot reset callback!!\n");
12383 ioc->pci_error_recovery = 0;
12385 pci_restore_state(pdev);
12386 rc = mpt3sas_base_map_resources(ioc);
12388 return PCI_ERS_RESULT_DISCONNECT;
12390 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
12391 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
12393 ioc_warn(ioc, "hard reset: %s\n",
12394 (rc == 0) ? "success" : "failed");
12397 return PCI_ERS_RESULT_RECOVERED;
12399 return PCI_ERS_RESULT_DISCONNECT;
12403 * scsih_pci_resume() - resume normal ops after PCI reset
12404 * @pdev: pointer to PCI device
12406 * Called when the error recovery driver tells us that its
12407 * OK to resume normal operation. Use completion to allow
12408 * halted scsi ops to resume.
12411 scsih_pci_resume(struct pci_dev *pdev)
12413 struct Scsi_Host *shost;
12414 struct MPT3SAS_ADAPTER *ioc;
12416 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12419 ioc_info(ioc, "PCI error: resume callback!!\n");
12421 mpt3sas_base_start_watchdog(ioc);
12422 scsi_unblock_requests(ioc->shost);
12426 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
12427 * @pdev: pointer to PCI device
12429 static pci_ers_result_t
12430 scsih_pci_mmio_enabled(struct pci_dev *pdev)
12432 struct Scsi_Host *shost;
12433 struct MPT3SAS_ADAPTER *ioc;
12435 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12436 return PCI_ERS_RESULT_DISCONNECT;
12438 ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
12440 /* TODO - dump whatever for debugging purposes */
12442 /* This called only if scsih_pci_error_detected returns
12443 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
12444 * works, no need to reset slot.
12446 return PCI_ERS_RESULT_RECOVERED;
12450 * scsih_ncq_prio_supp - Check for NCQ command priority support
12451 * @sdev: scsi device struct
12453 * This is called when a user indicates they would like to enable
12454 * ncq command priorities. This works only on SATA devices.
12456 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
12458 unsigned char *buf;
12459 bool ncq_prio_supp = false;
12461 if (!scsi_device_supports_vpd(sdev))
12462 return ncq_prio_supp;
12464 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
12466 return ncq_prio_supp;
12468 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
12469 ncq_prio_supp = (buf[213] >> 4) & 1;
12472 return ncq_prio_supp;
12475 * The pci device ids are defined in mpi/mpi2_cnfg.h.
12477 static const struct pci_device_id mpt3sas_pci_table[] = {
12478 /* Spitfire ~ 2004 */
12479 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12480 PCI_ANY_ID, PCI_ANY_ID },
12481 /* Falcon ~ 2008 */
12482 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12483 PCI_ANY_ID, PCI_ANY_ID },
12484 /* Liberator ~ 2108 */
12485 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12486 PCI_ANY_ID, PCI_ANY_ID },
12487 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12488 PCI_ANY_ID, PCI_ANY_ID },
12489 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12490 PCI_ANY_ID, PCI_ANY_ID },
12491 /* Meteor ~ 2116 */
12492 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12493 PCI_ANY_ID, PCI_ANY_ID },
12494 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12495 PCI_ANY_ID, PCI_ANY_ID },
12496 /* Thunderbolt ~ 2208 */
12497 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12498 PCI_ANY_ID, PCI_ANY_ID },
12499 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12500 PCI_ANY_ID, PCI_ANY_ID },
12501 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12502 PCI_ANY_ID, PCI_ANY_ID },
12503 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12504 PCI_ANY_ID, PCI_ANY_ID },
12505 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12506 PCI_ANY_ID, PCI_ANY_ID },
12507 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12508 PCI_ANY_ID, PCI_ANY_ID },
12509 /* Mustang ~ 2308 */
12510 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12511 PCI_ANY_ID, PCI_ANY_ID },
12512 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12513 PCI_ANY_ID, PCI_ANY_ID },
12514 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12515 PCI_ANY_ID, PCI_ANY_ID },
12516 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
12517 PCI_ANY_ID, PCI_ANY_ID },
12518 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12519 PCI_ANY_ID, PCI_ANY_ID },
12521 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12522 PCI_ANY_ID, PCI_ANY_ID },
12523 /* Fury ~ 3004 and 3008 */
12524 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12525 PCI_ANY_ID, PCI_ANY_ID },
12526 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12527 PCI_ANY_ID, PCI_ANY_ID },
12528 /* Invader ~ 3108 */
12529 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12530 PCI_ANY_ID, PCI_ANY_ID },
12531 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12532 PCI_ANY_ID, PCI_ANY_ID },
12533 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12534 PCI_ANY_ID, PCI_ANY_ID },
12535 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12536 PCI_ANY_ID, PCI_ANY_ID },
12537 /* Cutlass ~ 3216 and 3224 */
12538 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12539 PCI_ANY_ID, PCI_ANY_ID },
12540 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12541 PCI_ANY_ID, PCI_ANY_ID },
12542 /* Intruder ~ 3316 and 3324 */
12543 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12544 PCI_ANY_ID, PCI_ANY_ID },
12545 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12546 PCI_ANY_ID, PCI_ANY_ID },
12547 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12548 PCI_ANY_ID, PCI_ANY_ID },
12549 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12550 PCI_ANY_ID, PCI_ANY_ID },
12551 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12552 PCI_ANY_ID, PCI_ANY_ID },
12553 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12554 PCI_ANY_ID, PCI_ANY_ID },
12555 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12556 PCI_ANY_ID, PCI_ANY_ID },
12557 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12558 PCI_ANY_ID, PCI_ANY_ID },
12559 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
12560 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12561 PCI_ANY_ID, PCI_ANY_ID },
12562 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12563 PCI_ANY_ID, PCI_ANY_ID },
12564 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12565 PCI_ANY_ID, PCI_ANY_ID },
12566 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12567 PCI_ANY_ID, PCI_ANY_ID },
12568 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12569 PCI_ANY_ID, PCI_ANY_ID },
12570 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12571 PCI_ANY_ID, PCI_ANY_ID },
12572 /* Mercator ~ 3616*/
12573 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12574 PCI_ANY_ID, PCI_ANY_ID },
12576 /* Aero SI 0x00E1 Configurable Secure
12577 * 0x00E2 Hard Secure
12579 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12580 PCI_ANY_ID, PCI_ANY_ID },
12581 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12582 PCI_ANY_ID, PCI_ANY_ID },
12585 * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
12587 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12588 PCI_ANY_ID, PCI_ANY_ID },
12589 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12590 PCI_ANY_ID, PCI_ANY_ID },
12592 /* Atlas PCIe Switch Management Port */
12593 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12594 PCI_ANY_ID, PCI_ANY_ID },
12596 /* Sea SI 0x00E5 Configurable Secure
12597 * 0x00E6 Hard Secure
12599 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12600 PCI_ANY_ID, PCI_ANY_ID },
12601 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12602 PCI_ANY_ID, PCI_ANY_ID },
12605 * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
12607 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12608 PCI_ANY_ID, PCI_ANY_ID },
12609 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12610 PCI_ANY_ID, PCI_ANY_ID },
12612 {0} /* Terminating entry */
12614 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12616 static struct pci_error_handlers _mpt3sas_err_handler = {
12617 .error_detected = scsih_pci_error_detected,
12618 .mmio_enabled = scsih_pci_mmio_enabled,
12619 .slot_reset = scsih_pci_slot_reset,
12620 .resume = scsih_pci_resume,
12623 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12625 static struct pci_driver mpt3sas_driver = {
12626 .name = MPT3SAS_DRIVER_NAME,
12627 .id_table = mpt3sas_pci_table,
12628 .probe = _scsih_probe,
12629 .remove = scsih_remove,
12630 .shutdown = scsih_shutdown,
12631 .err_handler = &_mpt3sas_err_handler,
12632 .driver.pm = &scsih_pm_ops,
12636 * scsih_init - main entry point for this driver.
12638 * Return: 0 success, anything else error.
12646 mpt3sas_base_initialize_callback_handler();
12648 /* queuecommand callback hander */
12649 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12651 /* task management callback handler */
12652 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12654 /* base internal commands callback handler */
12655 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12656 port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12657 mpt3sas_port_enable_done);
12659 /* transport internal commands callback handler */
12660 transport_cb_idx = mpt3sas_base_register_callback_handler(
12661 mpt3sas_transport_done);
12663 /* scsih internal commands callback handler */
12664 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12666 /* configuration page API internal commands callback handler */
12667 config_cb_idx = mpt3sas_base_register_callback_handler(
12668 mpt3sas_config_done);
12670 /* ctl module callback handler */
12671 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12673 tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12674 _scsih_tm_tr_complete);
12676 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12677 _scsih_tm_volume_tr_complete);
12679 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12680 _scsih_sas_control_complete);
12682 mpt3sas_init_debugfs();
12687 * scsih_exit - exit point for this driver (when it is a module).
12689 * Return: 0 success, anything else error.
12695 mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12696 mpt3sas_base_release_callback_handler(tm_cb_idx);
12697 mpt3sas_base_release_callback_handler(base_cb_idx);
12698 mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12699 mpt3sas_base_release_callback_handler(transport_cb_idx);
12700 mpt3sas_base_release_callback_handler(scsih_cb_idx);
12701 mpt3sas_base_release_callback_handler(config_cb_idx);
12702 mpt3sas_base_release_callback_handler(ctl_cb_idx);
12704 mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12705 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12706 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12708 /* raid transport support */
12709 if (hbas_to_enumerate != 1)
12710 raid_class_release(mpt3sas_raid_template);
12711 if (hbas_to_enumerate != 2)
12712 raid_class_release(mpt2sas_raid_template);
12713 sas_release_transport(mpt3sas_transport_template);
12714 mpt3sas_exit_debugfs();
12718 * _mpt3sas_init - main entry point for this driver.
12720 * Return: 0 success, anything else error.
12723 _mpt3sas_init(void)
12727 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12728 MPT3SAS_DRIVER_VERSION);
12730 mpt3sas_transport_template =
12731 sas_attach_transport(&mpt3sas_transport_functions);
12732 if (!mpt3sas_transport_template)
12735 /* No need attach mpt3sas raid functions template
12736 * if hbas_to_enumarate value is one.
12738 if (hbas_to_enumerate != 1) {
12739 mpt3sas_raid_template =
12740 raid_class_attach(&mpt3sas_raid_functions);
12741 if (!mpt3sas_raid_template) {
12742 sas_release_transport(mpt3sas_transport_template);
12747 /* No need to attach mpt2sas raid functions template
12748 * if hbas_to_enumarate value is two
12750 if (hbas_to_enumerate != 2) {
12751 mpt2sas_raid_template =
12752 raid_class_attach(&mpt2sas_raid_functions);
12753 if (!mpt2sas_raid_template) {
12754 sas_release_transport(mpt3sas_transport_template);
12759 error = scsih_init();
12765 mpt3sas_ctl_init(hbas_to_enumerate);
12767 error = pci_register_driver(&mpt3sas_driver);
12775 * _mpt3sas_exit - exit point for this driver (when it is a module).
12779 _mpt3sas_exit(void)
12781 pr_info("mpt3sas version %s unloading\n",
12782 MPT3SAS_DRIVER_VERSION);
12784 mpt3sas_ctl_exit(hbas_to_enumerate);
12786 pci_unregister_driver(&mpt3sas_driver);
12791 module_init(_mpt3sas_init);
12792 module_exit(_mpt3sas_exit);