2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5 * Copyright (C) 2012-2014 LSI Corporation
6 * Copyright (C) 2013-2014 Avago Technologies
7 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/aer.h>
56 #include <linux/raid_class.h>
57 #include <linux/blk-mq-pci.h>
58 #include <asm/unaligned.h>
60 #include "mpt3sas_base.h"
62 #define RAID_CHANNEL 1
64 #define PCIE_CHANNEL 2
67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
68 struct _sas_node *sas_expander);
69 static void _firmware_event_work(struct work_struct *work);
71 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
72 struct _sas_device *sas_device);
73 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
74 u8 retry_count, u8 is_pd);
75 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
76 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
77 struct _pcie_device *pcie_device);
79 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
80 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
82 /* global parameters */
83 LIST_HEAD(mpt3sas_ioc_list);
84 /* global ioc lock for list operations */
85 DEFINE_SPINLOCK(gioc_lock);
87 MODULE_AUTHOR(MPT3SAS_AUTHOR);
88 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
91 MODULE_ALIAS("mpt2sas");
93 /* local parameters */
94 static u8 scsi_io_cb_idx = -1;
95 static u8 tm_cb_idx = -1;
96 static u8 ctl_cb_idx = -1;
97 static u8 base_cb_idx = -1;
98 static u8 port_enable_cb_idx = -1;
99 static u8 transport_cb_idx = -1;
100 static u8 scsih_cb_idx = -1;
101 static u8 config_cb_idx = -1;
105 static u8 tm_tr_cb_idx = -1 ;
106 static u8 tm_tr_volume_cb_idx = -1 ;
107 static u8 tm_sas_control_cb_idx = -1;
109 /* command line options */
110 static u32 logging_level;
111 MODULE_PARM_DESC(logging_level,
112 " bits for enabling additional logging info (default=0)");
115 static ushort max_sectors = 0xFFFF;
116 module_param(max_sectors, ushort, 0444);
117 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
120 static int missing_delay[2] = {-1, -1};
121 module_param_array(missing_delay, int, NULL, 0444);
122 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
124 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
125 #define MPT3SAS_MAX_LUN (16895)
126 static u64 max_lun = MPT3SAS_MAX_LUN;
127 module_param(max_lun, ullong, 0444);
128 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
130 static ushort hbas_to_enumerate;
131 module_param(hbas_to_enumerate, ushort, 0444);
132 MODULE_PARM_DESC(hbas_to_enumerate,
133 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
134 1 - enumerates only SAS 2.0 generation HBAs\n \
135 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
137 /* diag_buffer_enable is bitwise
139 * bit 1 set = SNAPSHOT
140 * bit 2 set = EXTENDED
142 * Either bit can be set, or both
144 static int diag_buffer_enable = -1;
145 module_param(diag_buffer_enable, int, 0444);
146 MODULE_PARM_DESC(diag_buffer_enable,
147 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
148 static int disable_discovery = -1;
149 module_param(disable_discovery, int, 0444);
150 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
153 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
154 static int prot_mask = -1;
155 module_param(prot_mask, int, 0444);
156 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
158 static bool enable_sdev_max_qd;
159 module_param(enable_sdev_max_qd, bool, 0444);
160 MODULE_PARM_DESC(enable_sdev_max_qd,
161 "Enable sdev max qd as can_queue, def=disabled(0)");
163 static int multipath_on_hba = -1;
164 module_param(multipath_on_hba, int, 0);
165 MODULE_PARM_DESC(multipath_on_hba,
166 "Multipath support to add same target device\n\t\t"
167 "as many times as it is visible to HBA from various paths\n\t\t"
169 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
170 "\t SAS 3.5 HBA - This will be enabled)");
172 static int host_tagset_enable = 1;
173 module_param(host_tagset_enable, int, 0444);
174 MODULE_PARM_DESC(host_tagset_enable,
175 "Shared host tagset enable/disable Default: enable(1)");
177 /* raid transport support */
178 static struct raid_template *mpt3sas_raid_template;
179 static struct raid_template *mpt2sas_raid_template;
183 * struct sense_info - common structure for obtaining sense keys
185 * @asc: additional sense code
186 * @ascq: additional sense code qualifier
194 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
195 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
196 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
197 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
198 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
200 * struct fw_event_work - firmware event struct
201 * @list: link list framework
202 * @work: work object (ioc->fault_reset_work_q)
203 * @ioc: per adapter object
204 * @device_handle: device handle
205 * @VF_ID: virtual function id
206 * @VP_ID: virtual port id
207 * @ignore: flag meaning this event has been marked to ignore
208 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
209 * @refcount: kref for this event
210 * @event_data: reply event data payload follows
212 * This object stored on ioc->fw_event_list.
214 struct fw_event_work {
215 struct list_head list;
216 struct work_struct work;
218 struct MPT3SAS_ADAPTER *ioc;
224 struct kref refcount;
225 char event_data[] __aligned(4);
228 static void fw_event_work_free(struct kref *r)
230 kfree(container_of(r, struct fw_event_work, refcount));
233 static void fw_event_work_get(struct fw_event_work *fw_work)
235 kref_get(&fw_work->refcount);
238 static void fw_event_work_put(struct fw_event_work *fw_work)
240 kref_put(&fw_work->refcount, fw_event_work_free);
243 static struct fw_event_work *alloc_fw_event_work(int len)
245 struct fw_event_work *fw_event;
247 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
251 kref_init(&fw_event->refcount);
256 * struct _scsi_io_transfer - scsi io transfer
257 * @handle: sas device handle (assigned by firmware)
258 * @is_raid: flag set for hidden raid components
259 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
260 * @data_length: data transfer length
261 * @data_dma: dma pointer to data
264 * @cdb_length: cdb length
266 * @timeout: timeout for this command
267 * @VF_ID: virtual function id
268 * @VP_ID: virtual port id
269 * @valid_reply: flag set for reply message
270 * @sense_length: sense length
271 * @ioc_status: ioc status
272 * @scsi_state: scsi state
273 * @scsi_status: scsi staus
274 * @log_info: log information
275 * @transfer_length: data length transfer when there is a reply message
277 * Used for sending internal scsi commands to devices within this module.
278 * Refer to _scsi_send_scsi_io().
280 struct _scsi_io_transfer {
283 enum dma_data_direction dir;
286 u8 sense[SCSI_SENSE_BUFFERSIZE];
294 /* the following bits are only valid when 'valid_reply = 1' */
304 * _scsih_set_debug_level - global setting of ioc->logging_level.
308 * Note: The logging levels are defined in mpt3sas_debug.h.
311 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
313 int ret = param_set_int(val, kp);
314 struct MPT3SAS_ADAPTER *ioc;
319 pr_info("setting logging_level(0x%08x)\n", logging_level);
320 spin_lock(&gioc_lock);
321 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
322 ioc->logging_level = logging_level;
323 spin_unlock(&gioc_lock);
326 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
327 &logging_level, 0644);
330 * _scsih_srch_boot_sas_address - search based on sas_address
331 * @sas_address: sas address
332 * @boot_device: boot device object from bios page 2
334 * Return: 1 when there's a match, 0 means no match.
337 _scsih_srch_boot_sas_address(u64 sas_address,
338 Mpi2BootDeviceSasWwid_t *boot_device)
340 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
344 * _scsih_srch_boot_device_name - search based on device name
345 * @device_name: device name specified in INDENTIFY fram
346 * @boot_device: boot device object from bios page 2
348 * Return: 1 when there's a match, 0 means no match.
351 _scsih_srch_boot_device_name(u64 device_name,
352 Mpi2BootDeviceDeviceName_t *boot_device)
354 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
358 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
359 * @enclosure_logical_id: enclosure logical id
360 * @slot_number: slot number
361 * @boot_device: boot device object from bios page 2
363 * Return: 1 when there's a match, 0 means no match.
366 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
367 Mpi2BootDeviceEnclosureSlot_t *boot_device)
369 return (enclosure_logical_id == le64_to_cpu(boot_device->
370 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
371 SlotNumber)) ? 1 : 0;
375 * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
376 * port number from port list
377 * @ioc: per adapter object
378 * @port_id: port number
379 * @bypass_dirty_port_flag: when set look the matching hba port entry even
380 * if hba port entry is marked as dirty.
382 * Search for hba port entry corresponding to provided port number,
383 * if available return port object otherwise return NULL.
386 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
387 u8 port_id, u8 bypass_dirty_port_flag)
389 struct hba_port *port, *port_next;
392 * When multipath_on_hba is disabled then
393 * search the hba_port entry using default
396 if (!ioc->multipath_on_hba)
397 port_id = MULTIPATH_DISABLED_PORT_ID;
399 list_for_each_entry_safe(port, port_next,
400 &ioc->port_table_list, list) {
401 if (port->port_id != port_id)
403 if (bypass_dirty_port_flag)
405 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
411 * Allocate hba_port object for default port id (i.e. 255)
412 * when multipath_on_hba is disabled for the HBA.
413 * And add this object to port_table_list.
415 if (!ioc->multipath_on_hba) {
416 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
420 port->port_id = port_id;
422 "hba_port entry: %p, port: %d is added to hba_port list\n",
423 port, port->port_id);
424 list_add_tail(&port->list,
425 &ioc->port_table_list);
432 * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
433 * @ioc: per adapter object
434 * @port: hba_port object
437 * Return virtual_phy object corresponding to phy number.
440 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
441 struct hba_port *port, u32 phy)
443 struct virtual_phy *vphy, *vphy_next;
445 if (!port->vphys_mask)
448 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
449 if (vphy->phy_mask & (1 << phy))
456 * _scsih_is_boot_device - search for matching boot device.
457 * @sas_address: sas address
458 * @device_name: device name specified in INDENTIFY fram
459 * @enclosure_logical_id: enclosure logical id
461 * @form: specifies boot device form
462 * @boot_device: boot device object from bios page 2
464 * Return: 1 when there's a match, 0 means no match.
467 _scsih_is_boot_device(u64 sas_address, u64 device_name,
468 u64 enclosure_logical_id, u16 slot, u8 form,
469 Mpi2BiosPage2BootDevice_t *boot_device)
474 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
477 rc = _scsih_srch_boot_sas_address(
478 sas_address, &boot_device->SasWwid);
480 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
481 if (!enclosure_logical_id)
483 rc = _scsih_srch_boot_encl_slot(
484 enclosure_logical_id,
485 slot, &boot_device->EnclosureSlot);
487 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
490 rc = _scsih_srch_boot_device_name(
491 device_name, &boot_device->DeviceName);
493 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
501 * _scsih_get_sas_address - set the sas_address for given device handle
503 * @handle: device handle
504 * @sas_address: sas address
506 * Return: 0 success, non-zero when failure
509 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
512 Mpi2SasDevicePage0_t sas_device_pg0;
513 Mpi2ConfigReply_t mpi_reply;
518 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
519 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
520 ioc_err(ioc, "failure at %s:%d/%s()!\n",
521 __FILE__, __LINE__, __func__);
525 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
526 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
527 /* For HBA, vSES doesn't return HBA SAS address. Instead return
528 * vSES's sas address.
530 if ((handle <= ioc->sas_hba.num_phys) &&
531 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
532 MPI2_SAS_DEVICE_INFO_SEP)))
533 *sas_address = ioc->sas_hba.sas_address;
535 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
539 /* we hit this because the given parent handle doesn't exist */
540 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
543 /* else error case */
544 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
545 handle, ioc_status, __FILE__, __LINE__, __func__);
550 * _scsih_determine_boot_device - determine boot device.
551 * @ioc: per adapter object
552 * @device: sas_device or pcie_device object
553 * @channel: SAS or PCIe channel
555 * Determines whether this device should be first reported device to
556 * to scsi-ml or sas transport, this purpose is for persistent boot device.
557 * There are primary, alternate, and current entries in bios page 2. The order
558 * priority is primary, alternate, then current. This routine saves
559 * the corresponding device object.
560 * The saved data to be used later in _scsih_probe_boot_devices().
563 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
566 struct _sas_device *sas_device;
567 struct _pcie_device *pcie_device;
568 struct _raid_device *raid_device;
571 u64 enclosure_logical_id;
574 /* only process this function when driver loads */
575 if (!ioc->is_driver_loading)
578 /* no Bios, return immediately */
579 if (!ioc->bios_pg3.BiosVersion)
582 if (channel == RAID_CHANNEL) {
583 raid_device = device;
584 sas_address = raid_device->wwid;
586 enclosure_logical_id = 0;
588 } else if (channel == PCIE_CHANNEL) {
589 pcie_device = device;
590 sas_address = pcie_device->wwid;
592 enclosure_logical_id = 0;
596 sas_address = sas_device->sas_address;
597 device_name = sas_device->device_name;
598 enclosure_logical_id = sas_device->enclosure_logical_id;
599 slot = sas_device->slot;
602 if (!ioc->req_boot_device.device) {
603 if (_scsih_is_boot_device(sas_address, device_name,
604 enclosure_logical_id, slot,
605 (ioc->bios_pg2.ReqBootDeviceForm &
606 MPI2_BIOSPAGE2_FORM_MASK),
607 &ioc->bios_pg2.RequestedBootDevice)) {
609 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
610 __func__, (u64)sas_address));
611 ioc->req_boot_device.device = device;
612 ioc->req_boot_device.channel = channel;
616 if (!ioc->req_alt_boot_device.device) {
617 if (_scsih_is_boot_device(sas_address, device_name,
618 enclosure_logical_id, slot,
619 (ioc->bios_pg2.ReqAltBootDeviceForm &
620 MPI2_BIOSPAGE2_FORM_MASK),
621 &ioc->bios_pg2.RequestedAltBootDevice)) {
623 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
624 __func__, (u64)sas_address));
625 ioc->req_alt_boot_device.device = device;
626 ioc->req_alt_boot_device.channel = channel;
630 if (!ioc->current_boot_device.device) {
631 if (_scsih_is_boot_device(sas_address, device_name,
632 enclosure_logical_id, slot,
633 (ioc->bios_pg2.CurrentBootDeviceForm &
634 MPI2_BIOSPAGE2_FORM_MASK),
635 &ioc->bios_pg2.CurrentBootDevice)) {
637 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
638 __func__, (u64)sas_address));
639 ioc->current_boot_device.device = device;
640 ioc->current_boot_device.channel = channel;
645 static struct _sas_device *
646 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
647 struct MPT3SAS_TARGET *tgt_priv)
649 struct _sas_device *ret;
651 assert_spin_locked(&ioc->sas_device_lock);
653 ret = tgt_priv->sas_dev;
660 static struct _sas_device *
661 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
662 struct MPT3SAS_TARGET *tgt_priv)
664 struct _sas_device *ret;
667 spin_lock_irqsave(&ioc->sas_device_lock, flags);
668 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
669 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
674 static struct _pcie_device *
675 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
676 struct MPT3SAS_TARGET *tgt_priv)
678 struct _pcie_device *ret;
680 assert_spin_locked(&ioc->pcie_device_lock);
682 ret = tgt_priv->pcie_dev;
684 pcie_device_get(ret);
690 * mpt3sas_get_pdev_from_target - pcie device search
691 * @ioc: per adapter object
692 * @tgt_priv: starget private object
694 * Context: This function will acquire ioc->pcie_device_lock and will release
695 * before returning the pcie_device object.
697 * This searches for pcie_device from target, then return pcie_device object.
699 static struct _pcie_device *
700 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
701 struct MPT3SAS_TARGET *tgt_priv)
703 struct _pcie_device *ret;
706 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
707 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
708 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
715 * __mpt3sas_get_sdev_by_rphy - sas device search
716 * @ioc: per adapter object
717 * @rphy: sas_rphy pointer
719 * Context: This function will acquire ioc->sas_device_lock and will release
720 * before returning the sas_device object.
722 * This searches for sas_device from rphy object
723 * then return sas_device object.
726 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
727 struct sas_rphy *rphy)
729 struct _sas_device *sas_device;
731 assert_spin_locked(&ioc->sas_device_lock);
733 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
734 if (sas_device->rphy != rphy)
736 sas_device_get(sas_device);
741 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
742 if (sas_device->rphy != rphy)
744 sas_device_get(sas_device);
752 * mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
753 * sas address from sas_device_list list
754 * @ioc: per adapter object
757 * Search for _sas_device object corresponding to provided sas address,
758 * if available return _sas_device object address otherwise return NULL.
761 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
762 u64 sas_address, struct hba_port *port)
764 struct _sas_device *sas_device;
769 assert_spin_locked(&ioc->sas_device_lock);
771 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
772 if (sas_device->sas_address != sas_address)
774 if (sas_device->port != port)
776 sas_device_get(sas_device);
780 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
781 if (sas_device->sas_address != sas_address)
783 if (sas_device->port != port)
785 sas_device_get(sas_device);
793 * mpt3sas_get_sdev_by_addr - sas device search
794 * @ioc: per adapter object
795 * @sas_address: sas address
796 * @port: hba port entry
797 * Context: Calling function should acquire ioc->sas_device_lock
799 * This searches for sas_device based on sas_address & port number,
800 * then return sas_device object.
803 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
804 u64 sas_address, struct hba_port *port)
806 struct _sas_device *sas_device;
809 spin_lock_irqsave(&ioc->sas_device_lock, flags);
810 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
812 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
817 static struct _sas_device *
818 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
820 struct _sas_device *sas_device;
822 assert_spin_locked(&ioc->sas_device_lock);
824 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
825 if (sas_device->handle == handle)
828 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
829 if (sas_device->handle == handle)
835 sas_device_get(sas_device);
840 * mpt3sas_get_sdev_by_handle - sas device search
841 * @ioc: per adapter object
842 * @handle: sas device handle (assigned by firmware)
843 * Context: Calling function should acquire ioc->sas_device_lock
845 * This searches for sas_device based on sas_address, then return sas_device
849 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
851 struct _sas_device *sas_device;
854 spin_lock_irqsave(&ioc->sas_device_lock, flags);
855 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
856 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
862 * _scsih_display_enclosure_chassis_info - display device location info
863 * @ioc: per adapter object
864 * @sas_device: per sas device object
865 * @sdev: scsi device struct
866 * @starget: scsi target struct
869 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
870 struct _sas_device *sas_device, struct scsi_device *sdev,
871 struct scsi_target *starget)
874 if (sas_device->enclosure_handle != 0)
875 sdev_printk(KERN_INFO, sdev,
876 "enclosure logical id (0x%016llx), slot(%d) \n",
878 sas_device->enclosure_logical_id,
880 if (sas_device->connector_name[0] != '\0')
881 sdev_printk(KERN_INFO, sdev,
882 "enclosure level(0x%04x), connector name( %s)\n",
883 sas_device->enclosure_level,
884 sas_device->connector_name);
885 if (sas_device->is_chassis_slot_valid)
886 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
887 sas_device->chassis_slot);
888 } else if (starget) {
889 if (sas_device->enclosure_handle != 0)
890 starget_printk(KERN_INFO, starget,
891 "enclosure logical id(0x%016llx), slot(%d) \n",
893 sas_device->enclosure_logical_id,
895 if (sas_device->connector_name[0] != '\0')
896 starget_printk(KERN_INFO, starget,
897 "enclosure level(0x%04x), connector name( %s)\n",
898 sas_device->enclosure_level,
899 sas_device->connector_name);
900 if (sas_device->is_chassis_slot_valid)
901 starget_printk(KERN_INFO, starget,
902 "chassis slot(0x%04x)\n",
903 sas_device->chassis_slot);
905 if (sas_device->enclosure_handle != 0)
906 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
907 (u64)sas_device->enclosure_logical_id,
909 if (sas_device->connector_name[0] != '\0')
910 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
911 sas_device->enclosure_level,
912 sas_device->connector_name);
913 if (sas_device->is_chassis_slot_valid)
914 ioc_info(ioc, "chassis slot(0x%04x)\n",
915 sas_device->chassis_slot);
920 * _scsih_sas_device_remove - remove sas_device from list.
921 * @ioc: per adapter object
922 * @sas_device: the sas_device object
923 * Context: This function will acquire ioc->sas_device_lock.
925 * If sas_device is on the list, remove it and decrement its reference count.
928 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
929 struct _sas_device *sas_device)
935 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
936 sas_device->handle, (u64)sas_device->sas_address);
938 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
941 * The lock serializes access to the list, but we still need to verify
942 * that nobody removed the entry while we were waiting on the lock.
944 spin_lock_irqsave(&ioc->sas_device_lock, flags);
945 if (!list_empty(&sas_device->list)) {
946 list_del_init(&sas_device->list);
947 sas_device_put(sas_device);
949 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
953 * _scsih_device_remove_by_handle - removing device object by handle
954 * @ioc: per adapter object
955 * @handle: device handle
958 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
960 struct _sas_device *sas_device;
963 if (ioc->shost_recovery)
966 spin_lock_irqsave(&ioc->sas_device_lock, flags);
967 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
969 list_del_init(&sas_device->list);
970 sas_device_put(sas_device);
972 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
974 _scsih_remove_device(ioc, sas_device);
975 sas_device_put(sas_device);
980 * mpt3sas_device_remove_by_sas_address - removing device object by
981 * sas address & port number
982 * @ioc: per adapter object
983 * @sas_address: device sas_address
984 * @port: hba port entry
989 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
990 u64 sas_address, struct hba_port *port)
992 struct _sas_device *sas_device;
995 if (ioc->shost_recovery)
998 spin_lock_irqsave(&ioc->sas_device_lock, flags);
999 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
1001 list_del_init(&sas_device->list);
1002 sas_device_put(sas_device);
1004 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1006 _scsih_remove_device(ioc, sas_device);
1007 sas_device_put(sas_device);
1012 * _scsih_sas_device_add - insert sas_device to the list.
1013 * @ioc: per adapter object
1014 * @sas_device: the sas_device object
1015 * Context: This function will acquire ioc->sas_device_lock.
1017 * Adding new object to the ioc->sas_device_list.
1020 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1021 struct _sas_device *sas_device)
1023 unsigned long flags;
1026 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1027 __func__, sas_device->handle,
1028 (u64)sas_device->sas_address));
1030 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1033 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1034 sas_device_get(sas_device);
1035 list_add_tail(&sas_device->list, &ioc->sas_device_list);
1036 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1038 if (ioc->hide_drives) {
1039 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1043 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1044 sas_device->sas_address_parent, sas_device->port)) {
1045 _scsih_sas_device_remove(ioc, sas_device);
1046 } else if (!sas_device->starget) {
1048 * When asyn scanning is enabled, its not possible to remove
1049 * devices while scanning is turned on due to an oops in
1050 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
1052 if (!ioc->is_driver_loading) {
1053 mpt3sas_transport_port_remove(ioc,
1054 sas_device->sas_address,
1055 sas_device->sas_address_parent,
1057 _scsih_sas_device_remove(ioc, sas_device);
1060 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1064 * _scsih_sas_device_init_add - insert sas_device to the list.
1065 * @ioc: per adapter object
1066 * @sas_device: the sas_device object
1067 * Context: This function will acquire ioc->sas_device_lock.
1069 * Adding new object at driver load time to the ioc->sas_device_init_list.
1072 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1073 struct _sas_device *sas_device)
1075 unsigned long flags;
1078 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1079 __func__, sas_device->handle,
1080 (u64)sas_device->sas_address));
1082 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1085 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1086 sas_device_get(sas_device);
1087 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1088 _scsih_determine_boot_device(ioc, sas_device, 0);
1089 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1093 static struct _pcie_device *
1094 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1096 struct _pcie_device *pcie_device;
1098 assert_spin_locked(&ioc->pcie_device_lock);
1100 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1101 if (pcie_device->wwid == wwid)
1104 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1105 if (pcie_device->wwid == wwid)
1111 pcie_device_get(pcie_device);
1117 * mpt3sas_get_pdev_by_wwid - pcie device search
1118 * @ioc: per adapter object
1121 * Context: This function will acquire ioc->pcie_device_lock and will release
1122 * before returning the pcie_device object.
1124 * This searches for pcie_device based on wwid, then return pcie_device object.
1126 static struct _pcie_device *
1127 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1129 struct _pcie_device *pcie_device;
1130 unsigned long flags;
1132 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1133 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1134 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1140 static struct _pcie_device *
1141 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1144 struct _pcie_device *pcie_device;
1146 assert_spin_locked(&ioc->pcie_device_lock);
1148 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1149 if (pcie_device->id == id && pcie_device->channel == channel)
1152 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1153 if (pcie_device->id == id && pcie_device->channel == channel)
1159 pcie_device_get(pcie_device);
1163 static struct _pcie_device *
1164 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1166 struct _pcie_device *pcie_device;
1168 assert_spin_locked(&ioc->pcie_device_lock);
1170 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1171 if (pcie_device->handle == handle)
1174 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1175 if (pcie_device->handle == handle)
1181 pcie_device_get(pcie_device);
1187 * mpt3sas_get_pdev_by_handle - pcie device search
1188 * @ioc: per adapter object
1189 * @handle: Firmware device handle
1191 * Context: This function will acquire ioc->pcie_device_lock and will release
1192 * before returning the pcie_device object.
1194 * This searches for pcie_device based on handle, then return pcie_device
1197 struct _pcie_device *
1198 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1200 struct _pcie_device *pcie_device;
1201 unsigned long flags;
1203 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1204 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1205 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1211 * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1212 * @ioc: per adapter object
1213 * Context: This function will acquire ioc->pcie_device_lock
1215 * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1216 * which has reported maximum among all available NVMe drives.
1217 * Minimum max_shutdown_latency will be six seconds.
1220 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1222 struct _pcie_device *pcie_device;
1223 unsigned long flags;
1224 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1226 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1227 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1228 if (pcie_device->shutdown_latency) {
1229 if (shutdown_latency < pcie_device->shutdown_latency)
1231 pcie_device->shutdown_latency;
1234 ioc->max_shutdown_latency = shutdown_latency;
1235 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1239 * _scsih_pcie_device_remove - remove pcie_device from list.
1240 * @ioc: per adapter object
1241 * @pcie_device: the pcie_device object
1242 * Context: This function will acquire ioc->pcie_device_lock.
1244 * If pcie_device is on the list, remove it and decrement its reference count.
1247 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1248 struct _pcie_device *pcie_device)
1250 unsigned long flags;
1251 int was_on_pcie_device_list = 0;
1252 u8 update_latency = 0;
1256 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1257 pcie_device->handle, (u64)pcie_device->wwid);
1258 if (pcie_device->enclosure_handle != 0)
1259 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1260 (u64)pcie_device->enclosure_logical_id,
1262 if (pcie_device->connector_name[0] != '\0')
1263 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1264 pcie_device->enclosure_level,
1265 pcie_device->connector_name);
1267 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1268 if (!list_empty(&pcie_device->list)) {
1269 list_del_init(&pcie_device->list);
1270 was_on_pcie_device_list = 1;
1272 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1274 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1275 if (was_on_pcie_device_list) {
1276 kfree(pcie_device->serial_number);
1277 pcie_device_put(pcie_device);
1281 * This device's RTD3 Entry Latency matches IOC's
1282 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1283 * from the available drives as current drive is getting removed.
1286 _scsih_set_nvme_max_shutdown_latency(ioc);
1291 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1292 * @ioc: per adapter object
1293 * @handle: device handle
1296 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1298 struct _pcie_device *pcie_device;
1299 unsigned long flags;
1300 int was_on_pcie_device_list = 0;
1301 u8 update_latency = 0;
1303 if (ioc->shost_recovery)
1306 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1307 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1309 if (!list_empty(&pcie_device->list)) {
1310 list_del_init(&pcie_device->list);
1311 was_on_pcie_device_list = 1;
1312 pcie_device_put(pcie_device);
1314 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1317 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1318 if (was_on_pcie_device_list) {
1319 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1320 pcie_device_put(pcie_device);
1324 * This device's RTD3 Entry Latency matches IOC's
1325 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1326 * from the available drives as current drive is getting removed.
1329 _scsih_set_nvme_max_shutdown_latency(ioc);
1333 * _scsih_pcie_device_add - add pcie_device object
1334 * @ioc: per adapter object
1335 * @pcie_device: pcie_device object
1337 * This is added to the pcie_device_list link list.
1340 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1341 struct _pcie_device *pcie_device)
1343 unsigned long flags;
1346 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1348 pcie_device->handle, (u64)pcie_device->wwid));
1349 if (pcie_device->enclosure_handle != 0)
1351 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1353 (u64)pcie_device->enclosure_logical_id,
1354 pcie_device->slot));
1355 if (pcie_device->connector_name[0] != '\0')
1357 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1358 __func__, pcie_device->enclosure_level,
1359 pcie_device->connector_name));
1361 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1362 pcie_device_get(pcie_device);
1363 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1364 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1366 if (pcie_device->access_status ==
1367 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1368 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1371 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1372 _scsih_pcie_device_remove(ioc, pcie_device);
1373 } else if (!pcie_device->starget) {
1374 if (!ioc->is_driver_loading) {
1375 /*TODO-- Need to find out whether this condition will occur or not*/
1376 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1379 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1383 * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1384 * @ioc: per adapter object
1385 * @pcie_device: the pcie_device object
1386 * Context: This function will acquire ioc->pcie_device_lock.
1388 * Adding new object at driver load time to the ioc->pcie_device_init_list.
1391 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1392 struct _pcie_device *pcie_device)
1394 unsigned long flags;
1397 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1399 pcie_device->handle, (u64)pcie_device->wwid));
1400 if (pcie_device->enclosure_handle != 0)
1402 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1404 (u64)pcie_device->enclosure_logical_id,
1405 pcie_device->slot));
1406 if (pcie_device->connector_name[0] != '\0')
1408 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1409 __func__, pcie_device->enclosure_level,
1410 pcie_device->connector_name));
1412 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1413 pcie_device_get(pcie_device);
1414 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1415 if (pcie_device->access_status !=
1416 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1417 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1418 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1421 * _scsih_raid_device_find_by_id - raid device search
1422 * @ioc: per adapter object
1423 * @id: sas device target id
1424 * @channel: sas device channel
1425 * Context: Calling function should acquire ioc->raid_device_lock
1427 * This searches for raid_device based on target id, then return raid_device
1430 static struct _raid_device *
1431 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1433 struct _raid_device *raid_device, *r;
1436 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1437 if (raid_device->id == id && raid_device->channel == channel) {
1448 * mpt3sas_raid_device_find_by_handle - raid device search
1449 * @ioc: per adapter object
1450 * @handle: sas device handle (assigned by firmware)
1451 * Context: Calling function should acquire ioc->raid_device_lock
1453 * This searches for raid_device based on handle, then return raid_device
1456 struct _raid_device *
1457 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1459 struct _raid_device *raid_device, *r;
1462 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1463 if (raid_device->handle != handle)
1474 * _scsih_raid_device_find_by_wwid - raid device search
1475 * @ioc: per adapter object
1477 * Context: Calling function should acquire ioc->raid_device_lock
1479 * This searches for raid_device based on wwid, then return raid_device
1482 static struct _raid_device *
1483 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1485 struct _raid_device *raid_device, *r;
1488 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1489 if (raid_device->wwid != wwid)
1500 * _scsih_raid_device_add - add raid_device object
1501 * @ioc: per adapter object
1502 * @raid_device: raid_device object
1504 * This is added to the raid_device_list link list.
1507 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1508 struct _raid_device *raid_device)
1510 unsigned long flags;
1513 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1515 raid_device->handle, (u64)raid_device->wwid));
1517 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1518 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1519 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1523 * _scsih_raid_device_remove - delete raid_device object
1524 * @ioc: per adapter object
1525 * @raid_device: raid_device object
1529 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1530 struct _raid_device *raid_device)
1532 unsigned long flags;
1534 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1535 list_del(&raid_device->list);
1537 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1541 * mpt3sas_scsih_expander_find_by_handle - expander device search
1542 * @ioc: per adapter object
1543 * @handle: expander handle (assigned by firmware)
1544 * Context: Calling function should acquire ioc->sas_device_lock
1546 * This searches for expander device based on handle, then returns the
1550 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1552 struct _sas_node *sas_expander, *r;
1555 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1556 if (sas_expander->handle != handle)
1566 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1567 * @ioc: per adapter object
1568 * @handle: enclosure handle (assigned by firmware)
1569 * Context: Calling function should acquire ioc->sas_device_lock
1571 * This searches for enclosure device based on handle, then returns the
1574 static struct _enclosure_node *
1575 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1577 struct _enclosure_node *enclosure_dev, *r;
1580 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1581 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1590 * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1591 * @ioc: per adapter object
1592 * @sas_address: sas address
1593 * @port: hba port entry
1594 * Context: Calling function should acquire ioc->sas_node_lock.
1596 * This searches for expander device based on sas_address & port number,
1597 * then returns the sas_node object.
1600 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1601 u64 sas_address, struct hba_port *port)
1603 struct _sas_node *sas_expander, *r = NULL;
1608 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1609 if (sas_expander->sas_address != sas_address)
1611 if (sas_expander->port != port)
1621 * _scsih_expander_node_add - insert expander device to the list.
1622 * @ioc: per adapter object
1623 * @sas_expander: the sas_device object
1624 * Context: This function will acquire ioc->sas_node_lock.
1626 * Adding new object to the ioc->sas_expander_list.
1629 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1630 struct _sas_node *sas_expander)
1632 unsigned long flags;
1634 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1635 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1636 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1640 * _scsih_is_end_device - determines if device is an end device
1641 * @device_info: bitfield providing information about the device.
1644 * Return: 1 if end device.
1647 _scsih_is_end_device(u32 device_info)
1649 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1650 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1651 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1652 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1659 * _scsih_is_nvme_pciescsi_device - determines if
1660 * device is an pcie nvme/scsi device
1661 * @device_info: bitfield providing information about the device.
1664 * Returns 1 if device is pcie device type nvme/scsi.
1667 _scsih_is_nvme_pciescsi_device(u32 device_info)
1669 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1670 == MPI26_PCIE_DEVINFO_NVME) ||
1671 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1672 == MPI26_PCIE_DEVINFO_SCSI))
1679 * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1680 * @ioc: per adapter object
1683 * Context: This function will acquire ioc->scsi_lookup_lock.
1685 * This will search for a matching channel:id in the scsi_lookup array,
1686 * returning 1 if found.
1689 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1693 struct scsi_cmnd *scmd;
1696 smid <= ioc->shost->can_queue; smid++) {
1697 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1700 if (scmd->device->id == id &&
1701 scmd->device->channel == channel)
1708 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1709 * @ioc: per adapter object
1713 * Context: This function will acquire ioc->scsi_lookup_lock.
1715 * This will search for a matching channel:id:lun in the scsi_lookup array,
1716 * returning 1 if found.
1719 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1720 unsigned int lun, int channel)
1723 struct scsi_cmnd *scmd;
1725 for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1727 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1730 if (scmd->device->id == id &&
1731 scmd->device->channel == channel &&
1732 scmd->device->lun == lun)
1739 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1740 * @ioc: per adapter object
1741 * @smid: system request message index
1743 * Return: the smid stored scmd pointer.
1744 * Then will dereference the stored scmd pointer.
1747 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1749 struct scsi_cmnd *scmd = NULL;
1750 struct scsiio_tracker *st;
1751 Mpi25SCSIIORequest_t *mpi_request;
1755 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1757 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
1759 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1762 * If SCSI IO request is outstanding at driver level then
1763 * DevHandle filed must be non-zero. If DevHandle is zero
1764 * then it means that this smid is free at driver level,
1767 if (!mpi_request->DevHandle)
1770 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1772 st = scsi_cmd_priv(scmd);
1773 if (st->cb_idx == 0xFF || st->smid == 0)
1781 * scsih_change_queue_depth - setting device queue depth
1782 * @sdev: scsi device struct
1783 * @qdepth: requested queue depth
1785 * Return: queue depth.
1788 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1790 struct Scsi_Host *shost = sdev->host;
1792 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1793 struct MPT3SAS_DEVICE *sas_device_priv_data;
1794 struct MPT3SAS_TARGET *sas_target_priv_data;
1795 struct _sas_device *sas_device;
1796 unsigned long flags;
1798 max_depth = shost->can_queue;
1801 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1804 if (ioc->enable_sdev_max_qd)
1807 sas_device_priv_data = sdev->hostdata;
1808 if (!sas_device_priv_data)
1810 sas_target_priv_data = sas_device_priv_data->sas_target;
1811 if (!sas_target_priv_data)
1813 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1816 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1817 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1819 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1820 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1822 sas_device_put(sas_device);
1824 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1828 if (!sdev->tagged_supported)
1830 if (qdepth > max_depth)
1832 scsi_change_queue_depth(sdev, qdepth);
1833 sdev_printk(KERN_INFO, sdev,
1834 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1835 sdev->queue_depth, sdev->tagged_supported,
1836 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1837 return sdev->queue_depth;
1841 * mpt3sas_scsih_change_queue_depth - setting device queue depth
1842 * @sdev: scsi device struct
1843 * @qdepth: requested queue depth
1848 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1850 struct Scsi_Host *shost = sdev->host;
1851 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1853 if (ioc->enable_sdev_max_qd)
1854 qdepth = shost->can_queue;
1856 scsih_change_queue_depth(sdev, qdepth);
1860 * scsih_target_alloc - target add routine
1861 * @starget: scsi target struct
1863 * Return: 0 if ok. Any other return is assumed to be an error and
1864 * the device is ignored.
1867 scsih_target_alloc(struct scsi_target *starget)
1869 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1870 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1871 struct MPT3SAS_TARGET *sas_target_priv_data;
1872 struct _sas_device *sas_device;
1873 struct _raid_device *raid_device;
1874 struct _pcie_device *pcie_device;
1875 unsigned long flags;
1876 struct sas_rphy *rphy;
1878 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1880 if (!sas_target_priv_data)
1883 starget->hostdata = sas_target_priv_data;
1884 sas_target_priv_data->starget = starget;
1885 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1888 if (starget->channel == RAID_CHANNEL) {
1889 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1890 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1893 sas_target_priv_data->handle = raid_device->handle;
1894 sas_target_priv_data->sas_address = raid_device->wwid;
1895 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1896 if (ioc->is_warpdrive)
1897 sas_target_priv_data->raid_device = raid_device;
1898 raid_device->starget = starget;
1900 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1905 if (starget->channel == PCIE_CHANNEL) {
1906 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1907 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1910 sas_target_priv_data->handle = pcie_device->handle;
1911 sas_target_priv_data->sas_address = pcie_device->wwid;
1912 sas_target_priv_data->port = NULL;
1913 sas_target_priv_data->pcie_dev = pcie_device;
1914 pcie_device->starget = starget;
1915 pcie_device->id = starget->id;
1916 pcie_device->channel = starget->channel;
1917 sas_target_priv_data->flags |=
1918 MPT_TARGET_FLAGS_PCIE_DEVICE;
1919 if (pcie_device->fast_path)
1920 sas_target_priv_data->flags |=
1921 MPT_TARGET_FASTPATH_IO;
1923 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1927 /* sas/sata devices */
1928 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1929 rphy = dev_to_rphy(starget->dev.parent);
1930 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1933 sas_target_priv_data->handle = sas_device->handle;
1934 sas_target_priv_data->sas_address = sas_device->sas_address;
1935 sas_target_priv_data->port = sas_device->port;
1936 sas_target_priv_data->sas_dev = sas_device;
1937 sas_device->starget = starget;
1938 sas_device->id = starget->id;
1939 sas_device->channel = starget->channel;
1940 if (test_bit(sas_device->handle, ioc->pd_handles))
1941 sas_target_priv_data->flags |=
1942 MPT_TARGET_FLAGS_RAID_COMPONENT;
1943 if (sas_device->fast_path)
1944 sas_target_priv_data->flags |=
1945 MPT_TARGET_FASTPATH_IO;
1947 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1953 * scsih_target_destroy - target destroy routine
1954 * @starget: scsi target struct
1957 scsih_target_destroy(struct scsi_target *starget)
1959 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1960 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1961 struct MPT3SAS_TARGET *sas_target_priv_data;
1962 struct _sas_device *sas_device;
1963 struct _raid_device *raid_device;
1964 struct _pcie_device *pcie_device;
1965 unsigned long flags;
1967 sas_target_priv_data = starget->hostdata;
1968 if (!sas_target_priv_data)
1971 if (starget->channel == RAID_CHANNEL) {
1972 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1973 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1976 raid_device->starget = NULL;
1977 raid_device->sdev = NULL;
1979 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1983 if (starget->channel == PCIE_CHANNEL) {
1984 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1985 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1986 sas_target_priv_data);
1987 if (pcie_device && (pcie_device->starget == starget) &&
1988 (pcie_device->id == starget->id) &&
1989 (pcie_device->channel == starget->channel))
1990 pcie_device->starget = NULL;
1994 * Corresponding get() is in _scsih_target_alloc()
1996 sas_target_priv_data->pcie_dev = NULL;
1997 pcie_device_put(pcie_device);
1998 pcie_device_put(pcie_device);
2000 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2004 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2005 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
2006 if (sas_device && (sas_device->starget == starget) &&
2007 (sas_device->id == starget->id) &&
2008 (sas_device->channel == starget->channel))
2009 sas_device->starget = NULL;
2013 * Corresponding get() is in _scsih_target_alloc()
2015 sas_target_priv_data->sas_dev = NULL;
2016 sas_device_put(sas_device);
2018 sas_device_put(sas_device);
2020 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2023 kfree(sas_target_priv_data);
2024 starget->hostdata = NULL;
2028 * scsih_slave_alloc - device add routine
2029 * @sdev: scsi device struct
2031 * Return: 0 if ok. Any other return is assumed to be an error and
2032 * the device is ignored.
2035 scsih_slave_alloc(struct scsi_device *sdev)
2037 struct Scsi_Host *shost;
2038 struct MPT3SAS_ADAPTER *ioc;
2039 struct MPT3SAS_TARGET *sas_target_priv_data;
2040 struct MPT3SAS_DEVICE *sas_device_priv_data;
2041 struct scsi_target *starget;
2042 struct _raid_device *raid_device;
2043 struct _sas_device *sas_device;
2044 struct _pcie_device *pcie_device;
2045 unsigned long flags;
2047 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2049 if (!sas_device_priv_data)
2052 sas_device_priv_data->lun = sdev->lun;
2053 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2055 starget = scsi_target(sdev);
2056 sas_target_priv_data = starget->hostdata;
2057 sas_target_priv_data->num_luns++;
2058 sas_device_priv_data->sas_target = sas_target_priv_data;
2059 sdev->hostdata = sas_device_priv_data;
2060 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2061 sdev->no_uld_attach = 1;
2063 shost = dev_to_shost(&starget->dev);
2064 ioc = shost_priv(shost);
2065 if (starget->channel == RAID_CHANNEL) {
2066 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2067 raid_device = _scsih_raid_device_find_by_id(ioc,
2068 starget->id, starget->channel);
2070 raid_device->sdev = sdev; /* raid is single lun */
2071 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2073 if (starget->channel == PCIE_CHANNEL) {
2074 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2075 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2076 sas_target_priv_data->sas_address);
2077 if (pcie_device && (pcie_device->starget == NULL)) {
2078 sdev_printk(KERN_INFO, sdev,
2079 "%s : pcie_device->starget set to starget @ %d\n",
2080 __func__, __LINE__);
2081 pcie_device->starget = starget;
2085 pcie_device_put(pcie_device);
2086 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2088 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2089 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2090 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2091 sas_target_priv_data->sas_address,
2092 sas_target_priv_data->port);
2093 if (sas_device && (sas_device->starget == NULL)) {
2094 sdev_printk(KERN_INFO, sdev,
2095 "%s : sas_device->starget set to starget @ %d\n",
2096 __func__, __LINE__);
2097 sas_device->starget = starget;
2101 sas_device_put(sas_device);
2103 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2110 * scsih_slave_destroy - device destroy routine
2111 * @sdev: scsi device struct
2114 scsih_slave_destroy(struct scsi_device *sdev)
2116 struct MPT3SAS_TARGET *sas_target_priv_data;
2117 struct scsi_target *starget;
2118 struct Scsi_Host *shost;
2119 struct MPT3SAS_ADAPTER *ioc;
2120 struct _sas_device *sas_device;
2121 struct _pcie_device *pcie_device;
2122 unsigned long flags;
2124 if (!sdev->hostdata)
2127 starget = scsi_target(sdev);
2128 sas_target_priv_data = starget->hostdata;
2129 sas_target_priv_data->num_luns--;
2131 shost = dev_to_shost(&starget->dev);
2132 ioc = shost_priv(shost);
2134 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2135 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2136 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2137 sas_target_priv_data);
2138 if (pcie_device && !sas_target_priv_data->num_luns)
2139 pcie_device->starget = NULL;
2142 pcie_device_put(pcie_device);
2144 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2146 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2147 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2148 sas_device = __mpt3sas_get_sdev_from_target(ioc,
2149 sas_target_priv_data);
2150 if (sas_device && !sas_target_priv_data->num_luns)
2151 sas_device->starget = NULL;
2154 sas_device_put(sas_device);
2155 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2158 kfree(sdev->hostdata);
2159 sdev->hostdata = NULL;
2163 * _scsih_display_sata_capabilities - sata capabilities
2164 * @ioc: per adapter object
2165 * @handle: device handle
2166 * @sdev: scsi device struct
2169 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2170 u16 handle, struct scsi_device *sdev)
2172 Mpi2ConfigReply_t mpi_reply;
2173 Mpi2SasDevicePage0_t sas_device_pg0;
2178 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2179 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2180 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2181 __FILE__, __LINE__, __func__);
2185 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2186 MPI2_IOCSTATUS_MASK;
2187 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2188 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2189 __FILE__, __LINE__, __func__);
2193 flags = le16_to_cpu(sas_device_pg0.Flags);
2194 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2196 sdev_printk(KERN_INFO, sdev,
2197 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2198 "sw_preserve(%s)\n",
2199 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2200 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2201 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2203 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2204 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2209 * raid transport support -
2210 * Enabled for SLES11 and newer, in older kernels the driver will panic when
2211 * unloading the driver followed by a load - I believe that the subroutine
2212 * raid_class_release() is not cleaning up properly.
2216 * scsih_is_raid - return boolean indicating device is raid volume
2217 * @dev: the device struct object
2220 scsih_is_raid(struct device *dev)
2222 struct scsi_device *sdev = to_scsi_device(dev);
2223 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2225 if (ioc->is_warpdrive)
2227 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2231 scsih_is_nvme(struct device *dev)
2233 struct scsi_device *sdev = to_scsi_device(dev);
2235 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2239 * scsih_get_resync - get raid volume resync percent complete
2240 * @dev: the device struct object
2243 scsih_get_resync(struct device *dev)
2245 struct scsi_device *sdev = to_scsi_device(dev);
2246 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2247 static struct _raid_device *raid_device;
2248 unsigned long flags;
2249 Mpi2RaidVolPage0_t vol_pg0;
2250 Mpi2ConfigReply_t mpi_reply;
2251 u32 volume_status_flags;
2252 u8 percent_complete;
2255 percent_complete = 0;
2257 if (ioc->is_warpdrive)
2260 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2261 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2264 handle = raid_device->handle;
2265 percent_complete = raid_device->percent_complete;
2267 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2272 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2273 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2274 sizeof(Mpi2RaidVolPage0_t))) {
2275 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2276 __FILE__, __LINE__, __func__);
2277 percent_complete = 0;
2281 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2282 if (!(volume_status_flags &
2283 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2284 percent_complete = 0;
2288 switch (ioc->hba_mpi_version_belonged) {
2290 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2294 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2300 * scsih_get_state - get raid volume level
2301 * @dev: the device struct object
2304 scsih_get_state(struct device *dev)
2306 struct scsi_device *sdev = to_scsi_device(dev);
2307 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2308 static struct _raid_device *raid_device;
2309 unsigned long flags;
2310 Mpi2RaidVolPage0_t vol_pg0;
2311 Mpi2ConfigReply_t mpi_reply;
2313 enum raid_state state = RAID_STATE_UNKNOWN;
2316 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2317 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2320 handle = raid_device->handle;
2321 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2326 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2327 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2328 sizeof(Mpi2RaidVolPage0_t))) {
2329 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2330 __FILE__, __LINE__, __func__);
2334 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2335 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2336 state = RAID_STATE_RESYNCING;
2340 switch (vol_pg0.VolumeState) {
2341 case MPI2_RAID_VOL_STATE_OPTIMAL:
2342 case MPI2_RAID_VOL_STATE_ONLINE:
2343 state = RAID_STATE_ACTIVE;
2345 case MPI2_RAID_VOL_STATE_DEGRADED:
2346 state = RAID_STATE_DEGRADED;
2348 case MPI2_RAID_VOL_STATE_FAILED:
2349 case MPI2_RAID_VOL_STATE_MISSING:
2350 state = RAID_STATE_OFFLINE;
2354 switch (ioc->hba_mpi_version_belonged) {
2356 raid_set_state(mpt2sas_raid_template, dev, state);
2360 raid_set_state(mpt3sas_raid_template, dev, state);
2366 * _scsih_set_level - set raid level
2368 * @sdev: scsi device struct
2369 * @volume_type: volume type
2372 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2373 struct scsi_device *sdev, u8 volume_type)
2375 enum raid_level level = RAID_LEVEL_UNKNOWN;
2377 switch (volume_type) {
2378 case MPI2_RAID_VOL_TYPE_RAID0:
2379 level = RAID_LEVEL_0;
2381 case MPI2_RAID_VOL_TYPE_RAID10:
2382 level = RAID_LEVEL_10;
2384 case MPI2_RAID_VOL_TYPE_RAID1E:
2385 level = RAID_LEVEL_1E;
2387 case MPI2_RAID_VOL_TYPE_RAID1:
2388 level = RAID_LEVEL_1;
2392 switch (ioc->hba_mpi_version_belonged) {
2394 raid_set_level(mpt2sas_raid_template,
2395 &sdev->sdev_gendev, level);
2399 raid_set_level(mpt3sas_raid_template,
2400 &sdev->sdev_gendev, level);
2407 * _scsih_get_volume_capabilities - volume capabilities
2408 * @ioc: per adapter object
2409 * @raid_device: the raid_device object
2411 * Return: 0 for success, else 1
2414 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2415 struct _raid_device *raid_device)
2417 Mpi2RaidVolPage0_t *vol_pg0;
2418 Mpi2RaidPhysDiskPage0_t pd_pg0;
2419 Mpi2SasDevicePage0_t sas_device_pg0;
2420 Mpi2ConfigReply_t mpi_reply;
2424 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2425 &num_pds)) || !num_pds) {
2427 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2428 __FILE__, __LINE__, __func__));
2432 raid_device->num_pds = num_pds;
2433 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2434 sizeof(Mpi2RaidVol0PhysDisk_t));
2435 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2438 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2439 __FILE__, __LINE__, __func__));
2443 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2444 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2446 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2447 __FILE__, __LINE__, __func__));
2452 raid_device->volume_type = vol_pg0->VolumeType;
2454 /* figure out what the underlying devices are by
2455 * obtaining the device_info bits for the 1st device
2457 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2458 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2459 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2460 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2461 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2462 le16_to_cpu(pd_pg0.DevHandle)))) {
2463 raid_device->device_info =
2464 le32_to_cpu(sas_device_pg0.DeviceInfo);
2473 * _scsih_enable_tlr - setting TLR flags
2474 * @ioc: per adapter object
2475 * @sdev: scsi device struct
2477 * Enabling Transaction Layer Retries for tape devices when
2478 * vpd page 0x90 is present
2482 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2486 if (sdev->type != TYPE_TAPE)
2489 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2492 sas_enable_tlr(sdev);
2493 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2494 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2500 * scsih_slave_configure - device configure routine.
2501 * @sdev: scsi device struct
2503 * Return: 0 if ok. Any other return is assumed to be an error and
2504 * the device is ignored.
2507 scsih_slave_configure(struct scsi_device *sdev)
2509 struct Scsi_Host *shost = sdev->host;
2510 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2511 struct MPT3SAS_DEVICE *sas_device_priv_data;
2512 struct MPT3SAS_TARGET *sas_target_priv_data;
2513 struct _sas_device *sas_device;
2514 struct _pcie_device *pcie_device;
2515 struct _raid_device *raid_device;
2516 unsigned long flags;
2521 u16 handle, volume_handle = 0;
2522 u64 volume_wwid = 0;
2525 sas_device_priv_data = sdev->hostdata;
2526 sas_device_priv_data->configured_lun = 1;
2527 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2528 sas_target_priv_data = sas_device_priv_data->sas_target;
2529 handle = sas_target_priv_data->handle;
2531 /* raid volume handling */
2532 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2534 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2535 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2536 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2539 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2540 __FILE__, __LINE__, __func__));
2544 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2546 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2547 __FILE__, __LINE__, __func__));
2552 * WARPDRIVE: Initialize the required data for Direct IO
2554 mpt3sas_init_warpdrive_properties(ioc, raid_device);
2556 /* RAID Queue Depth Support
2557 * IS volume = underlying qdepth of drive type, either
2558 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2559 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2561 if (raid_device->device_info &
2562 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2563 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2566 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2567 if (raid_device->device_info &
2568 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2574 switch (raid_device->volume_type) {
2575 case MPI2_RAID_VOL_TYPE_RAID0:
2578 case MPI2_RAID_VOL_TYPE_RAID1E:
2579 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2580 if (ioc->manu_pg10.OEMIdentifier &&
2581 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2582 MFG10_GF0_R10_DISPLAY) &&
2583 !(raid_device->num_pds % 2))
2588 case MPI2_RAID_VOL_TYPE_RAID1:
2589 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2592 case MPI2_RAID_VOL_TYPE_RAID10:
2593 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2596 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2598 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2603 if (!ioc->hide_ir_msg)
2604 sdev_printk(KERN_INFO, sdev,
2605 "%s: handle(0x%04x), wwid(0x%016llx),"
2606 " pd_count(%d), type(%s)\n",
2607 r_level, raid_device->handle,
2608 (unsigned long long)raid_device->wwid,
2609 raid_device->num_pds, ds);
2611 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2612 blk_queue_max_hw_sectors(sdev->request_queue,
2613 MPT3SAS_RAID_MAX_SECTORS);
2614 sdev_printk(KERN_INFO, sdev,
2615 "Set queue's max_sector to: %u\n",
2616 MPT3SAS_RAID_MAX_SECTORS);
2619 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2621 /* raid transport support */
2622 if (!ioc->is_warpdrive)
2623 _scsih_set_level(ioc, sdev, raid_device->volume_type);
2627 /* non-raid handling */
2628 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2629 if (mpt3sas_config_get_volume_handle(ioc, handle,
2632 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2633 __FILE__, __LINE__, __func__));
2636 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2637 volume_handle, &volume_wwid)) {
2639 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2640 __FILE__, __LINE__, __func__));
2646 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2647 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2648 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2649 sas_device_priv_data->sas_target->sas_address);
2651 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2653 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2654 __FILE__, __LINE__, __func__));
2658 qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2660 sdev_printk(KERN_INFO, sdev,
2661 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2662 ds, handle, (unsigned long long)pcie_device->wwid,
2663 pcie_device->port_num);
2664 if (pcie_device->enclosure_handle != 0)
2665 sdev_printk(KERN_INFO, sdev,
2666 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2668 (unsigned long long)pcie_device->enclosure_logical_id,
2670 if (pcie_device->connector_name[0] != '\0')
2671 sdev_printk(KERN_INFO, sdev,
2672 "%s: enclosure level(0x%04x),"
2673 "connector name( %s)\n", ds,
2674 pcie_device->enclosure_level,
2675 pcie_device->connector_name);
2677 if (pcie_device->nvme_mdts)
2678 blk_queue_max_hw_sectors(sdev->request_queue,
2679 pcie_device->nvme_mdts/512);
2681 pcie_device_put(pcie_device);
2682 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2683 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2684 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2685 ** merged and can eliminate holes created during merging
2688 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2689 sdev->request_queue);
2690 blk_queue_virt_boundary(sdev->request_queue,
2691 ioc->page_size - 1);
2695 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2696 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2697 sas_device_priv_data->sas_target->sas_address,
2698 sas_device_priv_data->sas_target->port);
2700 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2702 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2703 __FILE__, __LINE__, __func__));
2707 sas_device->volume_handle = volume_handle;
2708 sas_device->volume_wwid = volume_wwid;
2709 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2710 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2712 if (sas_device->device_info &
2713 MPI2_SAS_DEVICE_INFO_SEP) {
2714 sdev_printk(KERN_WARNING, sdev,
2715 "set ignore_delay_remove for handle(0x%04x)\n",
2716 sas_device_priv_data->sas_target->handle);
2717 sas_device_priv_data->ignore_delay_remove = 1;
2722 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2723 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2725 else if (sas_device->device_info &
2726 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2730 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2731 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2732 ds, handle, (unsigned long long)sas_device->sas_address,
2733 sas_device->phy, (unsigned long long)sas_device->device_name);
2735 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2737 sas_device_put(sas_device);
2738 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2741 _scsih_display_sata_capabilities(ioc, handle, sdev);
2744 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2747 sas_read_port_mode_page(sdev);
2748 _scsih_enable_tlr(ioc, sdev);
2755 * scsih_bios_param - fetch head, sector, cylinder info for a disk
2756 * @sdev: scsi device struct
2757 * @bdev: pointer to block device context
2758 * @capacity: device size (in 512 byte sectors)
2759 * @params: three element array to place output:
2760 * params[0] number of heads (max 255)
2761 * params[1] number of sectors (max 63)
2762 * params[2] number of cylinders
2765 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2766 sector_t capacity, int params[])
2776 dummy = heads * sectors;
2777 cylinders = capacity;
2778 sector_div(cylinders, dummy);
2781 * Handle extended translation size for logical drives
2784 if ((ulong)capacity >= 0x200000) {
2787 dummy = heads * sectors;
2788 cylinders = capacity;
2789 sector_div(cylinders, dummy);
2794 params[1] = sectors;
2795 params[2] = cylinders;
2801 * _scsih_response_code - translation of device response code
2802 * @ioc: per adapter object
2803 * @response_code: response code returned by the device
2806 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2810 switch (response_code) {
2811 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2812 desc = "task management request completed";
2814 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2815 desc = "invalid frame";
2817 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2818 desc = "task management request not supported";
2820 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2821 desc = "task management request failed";
2823 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2824 desc = "task management request succeeded";
2826 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2827 desc = "invalid lun";
2830 desc = "overlapped tag attempted";
2832 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2833 desc = "task queued, however not sent to target";
2839 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2843 * _scsih_tm_done - tm completion routine
2844 * @ioc: per adapter object
2845 * @smid: system request message index
2846 * @msix_index: MSIX table index supplied by the OS
2847 * @reply: reply message frame(lower 32bit addr)
2850 * The callback handler when using scsih_issue_tm.
2852 * Return: 1 meaning mf should be freed from _base_interrupt
2853 * 0 means the mf is freed from this function.
2856 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2858 MPI2DefaultReply_t *mpi_reply;
2860 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2862 if (ioc->tm_cmds.smid != smid)
2864 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2865 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2867 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2868 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2870 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2871 complete(&ioc->tm_cmds.done);
2876 * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2877 * @ioc: per adapter object
2878 * @handle: device handle
2880 * During taskmangement request, we need to freeze the device queue.
2883 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2885 struct MPT3SAS_DEVICE *sas_device_priv_data;
2886 struct scsi_device *sdev;
2889 shost_for_each_device(sdev, ioc->shost) {
2892 sas_device_priv_data = sdev->hostdata;
2893 if (!sas_device_priv_data)
2895 if (sas_device_priv_data->sas_target->handle == handle) {
2896 sas_device_priv_data->sas_target->tm_busy = 1;
2898 ioc->ignore_loginfos = 1;
2904 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2905 * @ioc: per adapter object
2906 * @handle: device handle
2908 * During taskmangement request, we need to freeze the device queue.
2911 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2913 struct MPT3SAS_DEVICE *sas_device_priv_data;
2914 struct scsi_device *sdev;
2917 shost_for_each_device(sdev, ioc->shost) {
2920 sas_device_priv_data = sdev->hostdata;
2921 if (!sas_device_priv_data)
2923 if (sas_device_priv_data->sas_target->handle == handle) {
2924 sas_device_priv_data->sas_target->tm_busy = 0;
2926 ioc->ignore_loginfos = 0;
2932 * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2933 * @ioc: per adapter object
2934 * @channel: the channel assigned by the OS
2935 * @id: the id assigned by the OS
2937 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2938 * @smid_task: smid assigned to the task
2940 * Look whether TM has aborted the timed out SCSI command, if
2941 * TM has aborted the IO then return SUCCESS else return FAILED.
2944 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2945 uint id, uint lun, u8 type, u16 smid_task)
2948 if (smid_task <= ioc->shost->can_queue) {
2950 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2951 if (!(_scsih_scsi_lookup_find_by_target(ioc,
2955 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2956 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2957 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2964 } else if (smid_task == ioc->scsih_cmds.smid) {
2965 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2966 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2968 } else if (smid_task == ioc->ctl_cmds.smid) {
2969 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2970 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2978 * scsih_tm_post_processing - post processing of target & LUN reset
2979 * @ioc: per adapter object
2980 * @handle: device handle
2981 * @channel: the channel assigned by the OS
2982 * @id: the id assigned by the OS
2984 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2985 * @smid_task: smid assigned to the task
2987 * Post processing of target & LUN reset. Due to interrupt latency
2988 * issue it possible that interrupt for aborted IO might not be
2989 * received yet. So before returning failure status, poll the
2990 * reply descriptor pools for the reply of timed out SCSI command.
2991 * Return FAILED status if reply for timed out is not received
2992 * otherwise return SUCCESS.
2995 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2996 uint channel, uint id, uint lun, u8 type, u16 smid_task)
3000 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3005 "Poll ReplyDescriptor queues for completion of"
3006 " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3007 smid_task, type, handle);
3010 * Due to interrupt latency issues, driver may receive interrupt for
3011 * TM first and then for aborted SCSI IO command. So, poll all the
3012 * ReplyDescriptor pools before returning the FAILED status to SML.
3014 mpt3sas_base_mask_interrupts(ioc);
3015 mpt3sas_base_sync_reply_irqs(ioc, 1);
3016 mpt3sas_base_unmask_interrupts(ioc);
3018 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3022 * mpt3sas_scsih_issue_tm - main routine for sending tm requests
3023 * @ioc: per adapter struct
3024 * @handle: device handle
3025 * @channel: the channel assigned by the OS
3026 * @id: the id assigned by the OS
3028 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3029 * @smid_task: smid assigned to the task
3030 * @msix_task: MSIX table index supplied by the OS
3031 * @timeout: timeout in seconds
3032 * @tr_method: Target Reset Method
3035 * A generic API for sending task management requests to firmware.
3037 * The callback index is set inside `ioc->tm_cb_idx`.
3038 * The caller is responsible to check for outstanding commands.
3040 * Return: SUCCESS or FAILED.
3043 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3044 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3045 u8 timeout, u8 tr_method)
3047 Mpi2SCSITaskManagementRequest_t *mpi_request;
3048 Mpi2SCSITaskManagementReply_t *mpi_reply;
3049 Mpi25SCSIIORequest_t *request;
3055 lockdep_assert_held(&ioc->tm_cmds.mutex);
3057 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3058 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3062 if (ioc->shost_recovery || ioc->remove_host ||
3063 ioc->pci_error_recovery) {
3064 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3068 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3069 if (ioc_state & MPI2_DOORBELL_USED) {
3070 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3071 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3072 return (!rc) ? SUCCESS : FAILED;
3075 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3076 mpt3sas_print_fault_code(ioc, ioc_state &
3077 MPI2_DOORBELL_DATA_MASK);
3078 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3079 return (!rc) ? SUCCESS : FAILED;
3080 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3081 MPI2_IOC_STATE_COREDUMP) {
3082 mpt3sas_print_coredump_info(ioc, ioc_state &
3083 MPI2_DOORBELL_DATA_MASK);
3084 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3085 return (!rc) ? SUCCESS : FAILED;
3088 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3090 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3095 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3096 handle, type, smid_task, timeout, tr_method));
3097 ioc->tm_cmds.status = MPT3_CMD_PENDING;
3098 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3099 ioc->tm_cmds.smid = smid;
3100 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3101 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3102 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3103 mpi_request->DevHandle = cpu_to_le16(handle);
3104 mpi_request->TaskType = type;
3105 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3106 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3107 mpi_request->MsgFlags = tr_method;
3108 mpi_request->TaskMID = cpu_to_le16(smid_task);
3109 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3110 mpt3sas_scsih_set_tm_flag(ioc, handle);
3111 init_completion(&ioc->tm_cmds.done);
3112 ioc->put_smid_hi_priority(ioc, smid, msix_task);
3113 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3114 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3115 mpt3sas_check_cmd_timeout(ioc,
3116 ioc->tm_cmds.status, mpi_request,
3117 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3119 rc = mpt3sas_base_hard_reset_handler(ioc,
3121 rc = (!rc) ? SUCCESS : FAILED;
3126 /* sync IRQs in case those were busy during flush. */
3127 mpt3sas_base_sync_reply_irqs(ioc, 0);
3129 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3130 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3131 mpi_reply = ioc->tm_cmds.reply;
3133 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3134 le16_to_cpu(mpi_reply->IOCStatus),
3135 le32_to_cpu(mpi_reply->IOCLogInfo),
3136 le32_to_cpu(mpi_reply->TerminationCount)));
3137 if (ioc->logging_level & MPT_DEBUG_TM) {
3138 _scsih_response_code(ioc, mpi_reply->ResponseCode);
3139 if (mpi_reply->IOCStatus)
3140 _debug_dump_mf(mpi_request,
3141 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3146 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3149 * If DevHandle filed in smid_task's entry of request pool
3150 * doesn't match with device handle on which this task abort
3151 * TM is received then it means that TM has successfully
3152 * aborted the timed out command. Since smid_task's entry in
3153 * request pool will be memset to zero once the timed out
3154 * command is returned to the SML. If the command is not
3155 * aborted then smid_task’s entry won’t be cleared and it
3156 * will have same DevHandle value on which this task abort TM
3157 * is received and driver will return the TM status as FAILED.
3159 request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3160 if (le16_to_cpu(request->DevHandle) != handle)
3163 ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3164 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3165 handle, timeout, tr_method, smid_task, msix_task);
3169 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3170 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3171 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3172 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3175 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3184 mpt3sas_scsih_clear_tm_flag(ioc, handle);
3185 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3189 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3190 uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3191 u16 msix_task, u8 timeout, u8 tr_method)
3195 mutex_lock(&ioc->tm_cmds.mutex);
3196 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3197 smid_task, msix_task, timeout, tr_method);
3198 mutex_unlock(&ioc->tm_cmds.mutex);
3204 * _scsih_tm_display_info - displays info about the device
3205 * @ioc: per adapter struct
3206 * @scmd: pointer to scsi command object
3208 * Called by task management callback handlers.
3211 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3213 struct scsi_target *starget = scmd->device->sdev_target;
3214 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3215 struct _sas_device *sas_device = NULL;
3216 struct _pcie_device *pcie_device = NULL;
3217 unsigned long flags;
3218 char *device_str = NULL;
3222 if (ioc->hide_ir_msg)
3223 device_str = "WarpDrive";
3225 device_str = "volume";
3227 scsi_print_command(scmd);
3228 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3229 starget_printk(KERN_INFO, starget,
3230 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
3231 device_str, priv_target->handle,
3232 device_str, (unsigned long long)priv_target->sas_address);
3234 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3235 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3236 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3238 starget_printk(KERN_INFO, starget,
3239 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3240 pcie_device->handle,
3241 (unsigned long long)pcie_device->wwid,
3242 pcie_device->port_num);
3243 if (pcie_device->enclosure_handle != 0)
3244 starget_printk(KERN_INFO, starget,
3245 "enclosure logical id(0x%016llx), slot(%d)\n",
3246 (unsigned long long)
3247 pcie_device->enclosure_logical_id,
3249 if (pcie_device->connector_name[0] != '\0')
3250 starget_printk(KERN_INFO, starget,
3251 "enclosure level(0x%04x), connector name( %s)\n",
3252 pcie_device->enclosure_level,
3253 pcie_device->connector_name);
3254 pcie_device_put(pcie_device);
3256 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3259 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3260 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3262 if (priv_target->flags &
3263 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3264 starget_printk(KERN_INFO, starget,
3265 "volume handle(0x%04x), "
3266 "volume wwid(0x%016llx)\n",
3267 sas_device->volume_handle,
3268 (unsigned long long)sas_device->volume_wwid);
3270 starget_printk(KERN_INFO, starget,
3271 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3273 (unsigned long long)sas_device->sas_address,
3276 _scsih_display_enclosure_chassis_info(NULL, sas_device,
3279 sas_device_put(sas_device);
3281 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3286 * scsih_abort - eh threads main abort routine
3287 * @scmd: pointer to scsi command object
3289 * Return: SUCCESS if command aborted else FAILED
3292 scsih_abort(struct scsi_cmnd *scmd)
3294 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3295 struct MPT3SAS_DEVICE *sas_device_priv_data;
3296 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3301 struct _pcie_device *pcie_device = NULL;
3302 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3303 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3304 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3305 (scmd->request->timeout / HZ) * 1000);
3306 _scsih_tm_display_info(ioc, scmd);
3308 sas_device_priv_data = scmd->device->hostdata;
3309 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3311 sdev_printk(KERN_INFO, scmd->device,
3312 "device been deleted! scmd(0x%p)\n", scmd);
3313 scmd->result = DID_NO_CONNECT << 16;
3314 scmd->scsi_done(scmd);
3319 /* check for completed command */
3320 if (st == NULL || st->cb_idx == 0xFF) {
3321 sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3322 "driver, assuming scmd(0x%p) might have completed\n", scmd);
3323 scmd->result = DID_RESET << 16;
3328 /* for hidden raid components and volumes this is not supported */
3329 if (sas_device_priv_data->sas_target->flags &
3330 MPT_TARGET_FLAGS_RAID_COMPONENT ||
3331 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3332 scmd->result = DID_RESET << 16;
3337 mpt3sas_halt_firmware(ioc);
3339 handle = sas_device_priv_data->sas_target->handle;
3340 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3341 if (pcie_device && (!ioc->tm_custom_handling) &&
3342 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3343 timeout = ioc->nvme_abort_timeout;
3344 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3345 scmd->device->id, scmd->device->lun,
3346 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3347 st->smid, st->msix_io, timeout, 0);
3348 /* Command must be cleared after abort */
3349 if (r == SUCCESS && st->cb_idx != 0xFF)
3352 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3353 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3355 pcie_device_put(pcie_device);
3360 * scsih_dev_reset - eh threads main device reset routine
3361 * @scmd: pointer to scsi command object
3363 * Return: SUCCESS if command aborted else FAILED
3366 scsih_dev_reset(struct scsi_cmnd *scmd)
3368 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3369 struct MPT3SAS_DEVICE *sas_device_priv_data;
3370 struct _sas_device *sas_device = NULL;
3371 struct _pcie_device *pcie_device = NULL;
3377 struct scsi_target *starget = scmd->device->sdev_target;
3378 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3380 sdev_printk(KERN_INFO, scmd->device,
3381 "attempting device reset! scmd(0x%p)\n", scmd);
3382 _scsih_tm_display_info(ioc, scmd);
3384 sas_device_priv_data = scmd->device->hostdata;
3385 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3387 sdev_printk(KERN_INFO, scmd->device,
3388 "device been deleted! scmd(0x%p)\n", scmd);
3389 scmd->result = DID_NO_CONNECT << 16;
3390 scmd->scsi_done(scmd);
3395 /* for hidden raid components obtain the volume_handle */
3397 if (sas_device_priv_data->sas_target->flags &
3398 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3399 sas_device = mpt3sas_get_sdev_from_target(ioc,
3402 handle = sas_device->volume_handle;
3404 handle = sas_device_priv_data->sas_target->handle;
3407 scmd->result = DID_RESET << 16;
3412 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3414 if (pcie_device && (!ioc->tm_custom_handling) &&
3415 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3416 tr_timeout = pcie_device->reset_timeout;
3417 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3419 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3421 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3422 scmd->device->id, scmd->device->lun,
3423 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3424 tr_timeout, tr_method);
3425 /* Check for busy commands after reset */
3426 if (r == SUCCESS && scsi_device_busy(scmd->device))
3429 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3430 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3433 sas_device_put(sas_device);
3435 pcie_device_put(pcie_device);
3441 * scsih_target_reset - eh threads main target reset routine
3442 * @scmd: pointer to scsi command object
3444 * Return: SUCCESS if command aborted else FAILED
3447 scsih_target_reset(struct scsi_cmnd *scmd)
3449 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3450 struct MPT3SAS_DEVICE *sas_device_priv_data;
3451 struct _sas_device *sas_device = NULL;
3452 struct _pcie_device *pcie_device = NULL;
3457 struct scsi_target *starget = scmd->device->sdev_target;
3458 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3460 starget_printk(KERN_INFO, starget,
3461 "attempting target reset! scmd(0x%p)\n", scmd);
3462 _scsih_tm_display_info(ioc, scmd);
3464 sas_device_priv_data = scmd->device->hostdata;
3465 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3467 starget_printk(KERN_INFO, starget,
3468 "target been deleted! scmd(0x%p)\n", scmd);
3469 scmd->result = DID_NO_CONNECT << 16;
3470 scmd->scsi_done(scmd);
3475 /* for hidden raid components obtain the volume_handle */
3477 if (sas_device_priv_data->sas_target->flags &
3478 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3479 sas_device = mpt3sas_get_sdev_from_target(ioc,
3482 handle = sas_device->volume_handle;
3484 handle = sas_device_priv_data->sas_target->handle;
3487 scmd->result = DID_RESET << 16;
3492 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3494 if (pcie_device && (!ioc->tm_custom_handling) &&
3495 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3496 tr_timeout = pcie_device->reset_timeout;
3497 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3499 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3500 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3501 scmd->device->id, 0,
3502 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3503 tr_timeout, tr_method);
3504 /* Check for busy commands after reset */
3505 if (r == SUCCESS && atomic_read(&starget->target_busy))
3508 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3509 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3512 sas_device_put(sas_device);
3514 pcie_device_put(pcie_device);
3520 * scsih_host_reset - eh threads main host reset routine
3521 * @scmd: pointer to scsi command object
3523 * Return: SUCCESS if command aborted else FAILED
3526 scsih_host_reset(struct scsi_cmnd *scmd)
3528 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3531 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3532 scsi_print_command(scmd);
3534 if (ioc->is_driver_loading || ioc->remove_host) {
3535 ioc_info(ioc, "Blocking the host reset\n");
3540 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3541 r = (retval < 0) ? FAILED : SUCCESS;
3543 ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3544 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3550 * _scsih_fw_event_add - insert and queue up fw_event
3551 * @ioc: per adapter object
3552 * @fw_event: object describing the event
3553 * Context: This function will acquire ioc->fw_event_lock.
3555 * This adds the firmware event object into link list, then queues it up to
3556 * be processed from user context.
3559 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3561 unsigned long flags;
3563 if (ioc->firmware_event_thread == NULL)
3566 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3567 fw_event_work_get(fw_event);
3568 INIT_LIST_HEAD(&fw_event->list);
3569 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3570 INIT_WORK(&fw_event->work, _firmware_event_work);
3571 fw_event_work_get(fw_event);
3572 queue_work(ioc->firmware_event_thread, &fw_event->work);
3573 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3577 * _scsih_fw_event_del_from_list - delete fw_event from the list
3578 * @ioc: per adapter object
3579 * @fw_event: object describing the event
3580 * Context: This function will acquire ioc->fw_event_lock.
3582 * If the fw_event is on the fw_event_list, remove it and do a put.
3585 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3588 unsigned long flags;
3590 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3591 if (!list_empty(&fw_event->list)) {
3592 list_del_init(&fw_event->list);
3593 fw_event_work_put(fw_event);
3595 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3600 * mpt3sas_send_trigger_data_event - send event for processing trigger data
3601 * @ioc: per adapter object
3602 * @event_data: trigger event data
3605 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3606 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3608 struct fw_event_work *fw_event;
3611 if (ioc->is_driver_loading)
3613 sz = sizeof(*event_data);
3614 fw_event = alloc_fw_event_work(sz);
3617 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3618 fw_event->ioc = ioc;
3619 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3620 _scsih_fw_event_add(ioc, fw_event);
3621 fw_event_work_put(fw_event);
3625 * _scsih_error_recovery_delete_devices - remove devices not responding
3626 * @ioc: per adapter object
3629 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3631 struct fw_event_work *fw_event;
3633 if (ioc->is_driver_loading)
3635 fw_event = alloc_fw_event_work(0);
3638 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3639 fw_event->ioc = ioc;
3640 _scsih_fw_event_add(ioc, fw_event);
3641 fw_event_work_put(fw_event);
3645 * mpt3sas_port_enable_complete - port enable completed (fake event)
3646 * @ioc: per adapter object
3649 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3651 struct fw_event_work *fw_event;
3653 fw_event = alloc_fw_event_work(0);
3656 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3657 fw_event->ioc = ioc;
3658 _scsih_fw_event_add(ioc, fw_event);
3659 fw_event_work_put(fw_event);
3662 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3664 unsigned long flags;
3665 struct fw_event_work *fw_event = NULL;
3667 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3668 if (!list_empty(&ioc->fw_event_list)) {
3669 fw_event = list_first_entry(&ioc->fw_event_list,
3670 struct fw_event_work, list);
3671 list_del_init(&fw_event->list);
3673 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3679 * _scsih_fw_event_cleanup_queue - cleanup event queue
3680 * @ioc: per adapter object
3682 * Walk the firmware event queue, either killing timers, or waiting
3683 * for outstanding events to complete
3685 * Context: task, can sleep
3688 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3690 struct fw_event_work *fw_event;
3692 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3693 !ioc->firmware_event_thread)
3696 ioc->fw_events_cleanup = 1;
3697 while ((fw_event = dequeue_next_fw_event(ioc)) ||
3698 (fw_event = ioc->current_event)) {
3700 * Wait on the fw_event to complete. If this returns 1, then
3701 * the event was never executed, and we need a put for the
3702 * reference the work had on the fw_event.
3704 * If it did execute, we wait for it to finish, and the put will
3705 * happen from _firmware_event_work()
3707 if (cancel_work_sync(&fw_event->work))
3708 fw_event_work_put(fw_event);
3710 fw_event_work_put(fw_event);
3712 ioc->fw_events_cleanup = 0;
3716 * _scsih_internal_device_block - block the sdev device
3717 * @sdev: per device object
3718 * @sas_device_priv_data : per device driver private data
3720 * make sure device is blocked without error, if not
3724 _scsih_internal_device_block(struct scsi_device *sdev,
3725 struct MPT3SAS_DEVICE *sas_device_priv_data)
3729 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3730 sas_device_priv_data->sas_target->handle);
3731 sas_device_priv_data->block = 1;
3733 r = scsi_internal_device_block_nowait(sdev);
3735 sdev_printk(KERN_WARNING, sdev,
3736 "device_block failed with return(%d) for handle(0x%04x)\n",
3737 r, sas_device_priv_data->sas_target->handle);
3741 * _scsih_internal_device_unblock - unblock the sdev device
3742 * @sdev: per device object
3743 * @sas_device_priv_data : per device driver private data
3744 * make sure device is unblocked without error, if not retry
3745 * by blocking and then unblocking
3749 _scsih_internal_device_unblock(struct scsi_device *sdev,
3750 struct MPT3SAS_DEVICE *sas_device_priv_data)
3754 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3755 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3756 sas_device_priv_data->block = 0;
3757 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3759 /* The device has been set to SDEV_RUNNING by SD layer during
3760 * device addition but the request queue is still stopped by
3761 * our earlier block call. We need to perform a block again
3762 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3764 sdev_printk(KERN_WARNING, sdev,
3765 "device_unblock failed with return(%d) for handle(0x%04x) "
3766 "performing a block followed by an unblock\n",
3767 r, sas_device_priv_data->sas_target->handle);
3768 sas_device_priv_data->block = 1;
3769 r = scsi_internal_device_block_nowait(sdev);
3771 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3772 "failed with return(%d) for handle(0x%04x)\n",
3773 r, sas_device_priv_data->sas_target->handle);
3775 sas_device_priv_data->block = 0;
3776 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3778 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3779 " failed with return(%d) for handle(0x%04x)\n",
3780 r, sas_device_priv_data->sas_target->handle);
3785 * _scsih_ublock_io_all_device - unblock every device
3786 * @ioc: per adapter object
3788 * change the device state from block to running
3791 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3793 struct MPT3SAS_DEVICE *sas_device_priv_data;
3794 struct scsi_device *sdev;
3796 shost_for_each_device(sdev, ioc->shost) {
3797 sas_device_priv_data = sdev->hostdata;
3798 if (!sas_device_priv_data)
3800 if (!sas_device_priv_data->block)
3803 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3804 "device_running, handle(0x%04x)\n",
3805 sas_device_priv_data->sas_target->handle));
3806 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3812 * _scsih_ublock_io_device - prepare device to be deleted
3813 * @ioc: per adapter object
3814 * @sas_address: sas address
3815 * @port: hba port entry
3817 * unblock then put device in offline state
3820 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3821 u64 sas_address, struct hba_port *port)
3823 struct MPT3SAS_DEVICE *sas_device_priv_data;
3824 struct scsi_device *sdev;
3826 shost_for_each_device(sdev, ioc->shost) {
3827 sas_device_priv_data = sdev->hostdata;
3828 if (!sas_device_priv_data)
3830 if (sas_device_priv_data->sas_target->sas_address
3833 if (sas_device_priv_data->sas_target->port != port)
3835 if (sas_device_priv_data->block)
3836 _scsih_internal_device_unblock(sdev,
3837 sas_device_priv_data);
3842 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3843 * @ioc: per adapter object
3845 * During device pull we need to appropriately set the sdev state.
3848 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3850 struct MPT3SAS_DEVICE *sas_device_priv_data;
3851 struct scsi_device *sdev;
3853 shost_for_each_device(sdev, ioc->shost) {
3854 sas_device_priv_data = sdev->hostdata;
3855 if (!sas_device_priv_data)
3857 if (sas_device_priv_data->block)
3859 if (sas_device_priv_data->ignore_delay_remove) {
3860 sdev_printk(KERN_INFO, sdev,
3861 "%s skip device_block for SES handle(0x%04x)\n",
3862 __func__, sas_device_priv_data->sas_target->handle);
3865 _scsih_internal_device_block(sdev, sas_device_priv_data);
3870 * _scsih_block_io_device - set the device state to SDEV_BLOCK
3871 * @ioc: per adapter object
3872 * @handle: device handle
3874 * During device pull we need to appropriately set the sdev state.
3877 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3879 struct MPT3SAS_DEVICE *sas_device_priv_data;
3880 struct scsi_device *sdev;
3881 struct _sas_device *sas_device;
3883 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3885 shost_for_each_device(sdev, ioc->shost) {
3886 sas_device_priv_data = sdev->hostdata;
3887 if (!sas_device_priv_data)
3889 if (sas_device_priv_data->sas_target->handle != handle)
3891 if (sas_device_priv_data->block)
3893 if (sas_device && sas_device->pend_sas_rphy_add)
3895 if (sas_device_priv_data->ignore_delay_remove) {
3896 sdev_printk(KERN_INFO, sdev,
3897 "%s skip device_block for SES handle(0x%04x)\n",
3898 __func__, sas_device_priv_data->sas_target->handle);
3901 _scsih_internal_device_block(sdev, sas_device_priv_data);
3905 sas_device_put(sas_device);
3909 * _scsih_block_io_to_children_attached_to_ex
3910 * @ioc: per adapter object
3911 * @sas_expander: the sas_device object
3913 * This routine set sdev state to SDEV_BLOCK for all devices
3914 * attached to this expander. This function called when expander is
3918 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3919 struct _sas_node *sas_expander)
3921 struct _sas_port *mpt3sas_port;
3922 struct _sas_device *sas_device;
3923 struct _sas_node *expander_sibling;
3924 unsigned long flags;
3929 list_for_each_entry(mpt3sas_port,
3930 &sas_expander->sas_port_list, port_list) {
3931 if (mpt3sas_port->remote_identify.device_type ==
3933 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3934 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3935 mpt3sas_port->remote_identify.sas_address,
3936 mpt3sas_port->hba_port);
3938 set_bit(sas_device->handle,
3939 ioc->blocking_handles);
3940 sas_device_put(sas_device);
3942 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3946 list_for_each_entry(mpt3sas_port,
3947 &sas_expander->sas_port_list, port_list) {
3949 if (mpt3sas_port->remote_identify.device_type ==
3950 SAS_EDGE_EXPANDER_DEVICE ||
3951 mpt3sas_port->remote_identify.device_type ==
3952 SAS_FANOUT_EXPANDER_DEVICE) {
3954 mpt3sas_scsih_expander_find_by_sas_address(
3955 ioc, mpt3sas_port->remote_identify.sas_address,
3956 mpt3sas_port->hba_port);
3957 _scsih_block_io_to_children_attached_to_ex(ioc,
3964 * _scsih_block_io_to_children_attached_directly
3965 * @ioc: per adapter object
3966 * @event_data: topology change event data
3968 * This routine set sdev state to SDEV_BLOCK for all devices
3969 * direct attached during device pull.
3972 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3973 Mpi2EventDataSasTopologyChangeList_t *event_data)
3979 for (i = 0; i < event_data->NumEntries; i++) {
3980 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3983 reason_code = event_data->PHY[i].PhyStatus &
3984 MPI2_EVENT_SAS_TOPO_RC_MASK;
3985 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3986 _scsih_block_io_device(ioc, handle);
3991 * _scsih_block_io_to_pcie_children_attached_directly
3992 * @ioc: per adapter object
3993 * @event_data: topology change event data
3995 * This routine set sdev state to SDEV_BLOCK for all devices
3996 * direct attached during device pull/reconnect.
3999 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4000 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4006 for (i = 0; i < event_data->NumEntries; i++) {
4008 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4011 reason_code = event_data->PortEntry[i].PortStatus;
4013 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4014 _scsih_block_io_device(ioc, handle);
4018 * _scsih_tm_tr_send - send task management request
4019 * @ioc: per adapter object
4020 * @handle: device handle
4021 * Context: interrupt time.
4023 * This code is to initiate the device removal handshake protocol
4024 * with controller firmware. This function will issue target reset
4025 * using high priority request queue. It will send a sas iounit
4026 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
4028 * This is designed to send muliple task management request at the same
4029 * time to the fifo. If the fifo is full, we will append the request,
4030 * and process it in a future completion.
4033 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4035 Mpi2SCSITaskManagementRequest_t *mpi_request;
4037 struct _sas_device *sas_device = NULL;
4038 struct _pcie_device *pcie_device = NULL;
4039 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4040 u64 sas_address = 0;
4041 unsigned long flags;
4042 struct _tr_list *delayed_tr;
4045 struct hba_port *port = NULL;
4047 if (ioc->pci_error_recovery) {
4049 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4053 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4054 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4056 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4061 /* if PD, then return */
4062 if (test_bit(handle, ioc->pd_handles))
4065 clear_bit(handle, ioc->pend_os_device_add);
4067 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4068 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4069 if (sas_device && sas_device->starget &&
4070 sas_device->starget->hostdata) {
4071 sas_target_priv_data = sas_device->starget->hostdata;
4072 sas_target_priv_data->deleted = 1;
4073 sas_address = sas_device->sas_address;
4074 port = sas_device->port;
4076 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4078 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4079 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4080 if (pcie_device && pcie_device->starget &&
4081 pcie_device->starget->hostdata) {
4082 sas_target_priv_data = pcie_device->starget->hostdata;
4083 sas_target_priv_data->deleted = 1;
4084 sas_address = pcie_device->wwid;
4086 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4087 if (pcie_device && (!ioc->tm_custom_handling) &&
4088 (!(mpt3sas_scsih_is_pcie_scsi_device(
4089 pcie_device->device_info))))
4091 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4093 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4095 if (sas_target_priv_data) {
4097 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4098 handle, (u64)sas_address));
4100 if (sas_device->enclosure_handle != 0)
4102 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4103 (u64)sas_device->enclosure_logical_id,
4105 if (sas_device->connector_name[0] != '\0')
4107 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4108 sas_device->enclosure_level,
4109 sas_device->connector_name));
4110 } else if (pcie_device) {
4111 if (pcie_device->enclosure_handle != 0)
4113 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4114 (u64)pcie_device->enclosure_logical_id,
4115 pcie_device->slot));
4116 if (pcie_device->connector_name[0] != '\0')
4118 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4119 pcie_device->enclosure_level,
4120 pcie_device->connector_name));
4122 _scsih_ublock_io_device(ioc, sas_address, port);
4123 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4126 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4128 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4131 INIT_LIST_HEAD(&delayed_tr->list);
4132 delayed_tr->handle = handle;
4133 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4135 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4141 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4142 handle, smid, ioc->tm_tr_cb_idx));
4143 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4144 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4145 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4146 mpi_request->DevHandle = cpu_to_le16(handle);
4147 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4148 mpi_request->MsgFlags = tr_method;
4149 set_bit(handle, ioc->device_remove_in_progress);
4150 ioc->put_smid_hi_priority(ioc, smid, 0);
4151 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4155 sas_device_put(sas_device);
4157 pcie_device_put(pcie_device);
4161 * _scsih_tm_tr_complete -
4162 * @ioc: per adapter object
4163 * @smid: system request message index
4164 * @msix_index: MSIX table index supplied by the OS
4165 * @reply: reply message frame(lower 32bit addr)
4166 * Context: interrupt time.
4168 * This is the target reset completion routine.
4169 * This code is part of the code to initiate the device removal
4170 * handshake protocol with controller firmware.
4171 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4173 * Return: 1 meaning mf should be freed from _base_interrupt
4174 * 0 means the mf is freed from this function.
4177 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4181 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4182 Mpi2SCSITaskManagementReply_t *mpi_reply =
4183 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4184 Mpi2SasIoUnitControlRequest_t *mpi_request;
4187 struct _sc_list *delayed_sc;
4189 if (ioc->pci_error_recovery) {
4191 ioc_info(ioc, "%s: host in pci error recovery\n",
4195 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4196 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4198 ioc_info(ioc, "%s: host is not operational\n",
4202 if (unlikely(!mpi_reply)) {
4203 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4204 __FILE__, __LINE__, __func__);
4207 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4208 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4209 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4211 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4213 le16_to_cpu(mpi_reply->DevHandle), smid));
4217 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4219 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4220 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4221 le32_to_cpu(mpi_reply->IOCLogInfo),
4222 le32_to_cpu(mpi_reply->TerminationCount)));
4224 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4225 if (!smid_sas_ctrl) {
4226 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4228 return _scsih_check_for_pending_tm(ioc, smid);
4229 INIT_LIST_HEAD(&delayed_sc->list);
4230 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4231 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4233 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4235 return _scsih_check_for_pending_tm(ioc, smid);
4239 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4240 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4241 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4242 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4243 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4244 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4245 mpi_request->DevHandle = mpi_request_tm->DevHandle;
4246 ioc->put_smid_default(ioc, smid_sas_ctrl);
4248 return _scsih_check_for_pending_tm(ioc, smid);
4251 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4252 * issue to IOC or not.
4253 * @ioc: per adapter object
4254 * @scmd: pointer to scsi command object
4256 * Returns true if scmd can be issued to IOC otherwise returns false.
4258 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4259 struct scsi_cmnd *scmd)
4262 if (ioc->pci_error_recovery)
4265 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4266 if (ioc->remove_host)
4272 if (ioc->remove_host) {
4274 switch (scmd->cmnd[0]) {
4275 case SYNCHRONIZE_CACHE:
4287 * _scsih_sas_control_complete - completion routine
4288 * @ioc: per adapter object
4289 * @smid: system request message index
4290 * @msix_index: MSIX table index supplied by the OS
4291 * @reply: reply message frame(lower 32bit addr)
4292 * Context: interrupt time.
4294 * This is the sas iounit control completion routine.
4295 * This code is part of the code to initiate the device removal
4296 * handshake protocol with controller firmware.
4298 * Return: 1 meaning mf should be freed from _base_interrupt
4299 * 0 means the mf is freed from this function.
4302 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4303 u8 msix_index, u32 reply)
4305 Mpi2SasIoUnitControlReply_t *mpi_reply =
4306 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4308 if (likely(mpi_reply)) {
4310 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4311 le16_to_cpu(mpi_reply->DevHandle), smid,
4312 le16_to_cpu(mpi_reply->IOCStatus),
4313 le32_to_cpu(mpi_reply->IOCLogInfo)));
4314 if (le16_to_cpu(mpi_reply->IOCStatus) ==
4315 MPI2_IOCSTATUS_SUCCESS) {
4316 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4317 ioc->device_remove_in_progress);
4320 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4321 __FILE__, __LINE__, __func__);
4323 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4327 * _scsih_tm_tr_volume_send - send target reset request for volumes
4328 * @ioc: per adapter object
4329 * @handle: device handle
4330 * Context: interrupt time.
4332 * This is designed to send muliple task management request at the same
4333 * time to the fifo. If the fifo is full, we will append the request,
4334 * and process it in a future completion.
4337 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4339 Mpi2SCSITaskManagementRequest_t *mpi_request;
4341 struct _tr_list *delayed_tr;
4343 if (ioc->pci_error_recovery) {
4345 ioc_info(ioc, "%s: host reset in progress!\n",
4350 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4352 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4355 INIT_LIST_HEAD(&delayed_tr->list);
4356 delayed_tr->handle = handle;
4357 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4359 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4365 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4366 handle, smid, ioc->tm_tr_volume_cb_idx));
4367 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4368 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4369 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4370 mpi_request->DevHandle = cpu_to_le16(handle);
4371 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4372 ioc->put_smid_hi_priority(ioc, smid, 0);
4376 * _scsih_tm_volume_tr_complete - target reset completion
4377 * @ioc: per adapter object
4378 * @smid: system request message index
4379 * @msix_index: MSIX table index supplied by the OS
4380 * @reply: reply message frame(lower 32bit addr)
4381 * Context: interrupt time.
4383 * Return: 1 meaning mf should be freed from _base_interrupt
4384 * 0 means the mf is freed from this function.
4387 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4388 u8 msix_index, u32 reply)
4391 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4392 Mpi2SCSITaskManagementReply_t *mpi_reply =
4393 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4395 if (ioc->shost_recovery || ioc->pci_error_recovery) {
4397 ioc_info(ioc, "%s: host reset in progress!\n",
4401 if (unlikely(!mpi_reply)) {
4402 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4403 __FILE__, __LINE__, __func__);
4407 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4408 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4409 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4411 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4412 handle, le16_to_cpu(mpi_reply->DevHandle),
4418 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4419 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4420 le32_to_cpu(mpi_reply->IOCLogInfo),
4421 le32_to_cpu(mpi_reply->TerminationCount)));
4423 return _scsih_check_for_pending_tm(ioc, smid);
4427 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4428 * @ioc: per adapter object
4429 * @smid: system request message index
4431 * @event_context: used to track events uniquely
4433 * Context - processed in interrupt context.
4436 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4439 Mpi2EventAckRequest_t *ack_request;
4440 int i = smid - ioc->internal_smid;
4441 unsigned long flags;
4443 /* Without releasing the smid just update the
4444 * call back index and reuse the same smid for
4445 * processing this delayed request
4447 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4448 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4449 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4452 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4453 le16_to_cpu(event), smid, ioc->base_cb_idx));
4454 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4455 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4456 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4457 ack_request->Event = event;
4458 ack_request->EventContext = event_context;
4459 ack_request->VF_ID = 0; /* TODO */
4460 ack_request->VP_ID = 0;
4461 ioc->put_smid_default(ioc, smid);
4465 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4466 * sas_io_unit_ctrl messages
4467 * @ioc: per adapter object
4468 * @smid: system request message index
4469 * @handle: device handle
4471 * Context - processed in interrupt context.
4474 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4475 u16 smid, u16 handle)
4477 Mpi2SasIoUnitControlRequest_t *mpi_request;
4479 int i = smid - ioc->internal_smid;
4480 unsigned long flags;
4482 if (ioc->remove_host) {
4484 ioc_info(ioc, "%s: host has been removed\n",
4487 } else if (ioc->pci_error_recovery) {
4489 ioc_info(ioc, "%s: host in pci error recovery\n",
4493 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4494 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4496 ioc_info(ioc, "%s: host is not operational\n",
4501 /* Without releasing the smid just update the
4502 * call back index and reuse the same smid for
4503 * processing this delayed request
4505 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4506 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4507 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4510 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4511 handle, smid, ioc->tm_sas_control_cb_idx));
4512 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4513 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4514 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4515 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4516 mpi_request->DevHandle = cpu_to_le16(handle);
4517 ioc->put_smid_default(ioc, smid);
4521 * _scsih_check_for_pending_internal_cmds - check for pending internal messages
4522 * @ioc: per adapter object
4523 * @smid: system request message index
4525 * Context: Executed in interrupt context
4527 * This will check delayed internal messages list, and process the
4530 * Return: 1 meaning mf should be freed from _base_interrupt
4531 * 0 means the mf is freed from this function.
4534 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4536 struct _sc_list *delayed_sc;
4537 struct _event_ack_list *delayed_event_ack;
4539 if (!list_empty(&ioc->delayed_event_ack_list)) {
4540 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4541 struct _event_ack_list, list);
4542 _scsih_issue_delayed_event_ack(ioc, smid,
4543 delayed_event_ack->Event, delayed_event_ack->EventContext);
4544 list_del(&delayed_event_ack->list);
4545 kfree(delayed_event_ack);
4549 if (!list_empty(&ioc->delayed_sc_list)) {
4550 delayed_sc = list_entry(ioc->delayed_sc_list.next,
4551 struct _sc_list, list);
4552 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4553 delayed_sc->handle);
4554 list_del(&delayed_sc->list);
4562 * _scsih_check_for_pending_tm - check for pending task management
4563 * @ioc: per adapter object
4564 * @smid: system request message index
4566 * This will check delayed target reset list, and feed the
4569 * Return: 1 meaning mf should be freed from _base_interrupt
4570 * 0 means the mf is freed from this function.
4573 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4575 struct _tr_list *delayed_tr;
4577 if (!list_empty(&ioc->delayed_tr_volume_list)) {
4578 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4579 struct _tr_list, list);
4580 mpt3sas_base_free_smid(ioc, smid);
4581 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4582 list_del(&delayed_tr->list);
4587 if (!list_empty(&ioc->delayed_tr_list)) {
4588 delayed_tr = list_entry(ioc->delayed_tr_list.next,
4589 struct _tr_list, list);
4590 mpt3sas_base_free_smid(ioc, smid);
4591 _scsih_tm_tr_send(ioc, delayed_tr->handle);
4592 list_del(&delayed_tr->list);
4601 * _scsih_check_topo_delete_events - sanity check on topo events
4602 * @ioc: per adapter object
4603 * @event_data: the event data payload
4605 * This routine added to better handle cable breaker.
4607 * This handles the case where driver receives multiple expander
4608 * add and delete events in a single shot. When there is a delete event
4609 * the routine will void any pending add events waiting in the event queue.
4612 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4613 Mpi2EventDataSasTopologyChangeList_t *event_data)
4615 struct fw_event_work *fw_event;
4616 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4617 u16 expander_handle;
4618 struct _sas_node *sas_expander;
4619 unsigned long flags;
4623 for (i = 0 ; i < event_data->NumEntries; i++) {
4624 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4627 reason_code = event_data->PHY[i].PhyStatus &
4628 MPI2_EVENT_SAS_TOPO_RC_MASK;
4629 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4630 _scsih_tm_tr_send(ioc, handle);
4633 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4634 if (expander_handle < ioc->sas_hba.num_phys) {
4635 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4638 if (event_data->ExpStatus ==
4639 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4640 /* put expander attached devices into blocking state */
4641 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4642 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4644 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4645 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4647 handle = find_first_bit(ioc->blocking_handles,
4648 ioc->facts.MaxDevHandle);
4649 if (handle < ioc->facts.MaxDevHandle)
4650 _scsih_block_io_device(ioc, handle);
4651 } while (test_and_clear_bit(handle, ioc->blocking_handles));
4652 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4653 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4655 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4658 /* mark ignore flag for pending events */
4659 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4660 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4661 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4664 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4665 fw_event->event_data;
4666 if (local_event_data->ExpStatus ==
4667 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4668 local_event_data->ExpStatus ==
4669 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4670 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4673 ioc_info(ioc, "setting ignoring flag\n"));
4674 fw_event->ignore = 1;
4678 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4682 * _scsih_check_pcie_topo_remove_events - sanity check on topo
4684 * @ioc: per adapter object
4685 * @event_data: the event data payload
4687 * This handles the case where driver receives multiple switch
4688 * or device add and delete events in a single shot. When there
4689 * is a delete event the routine will void any pending add
4690 * events waiting in the event queue.
4693 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4694 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4696 struct fw_event_work *fw_event;
4697 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4698 unsigned long flags;
4700 u16 handle, switch_handle;
4702 for (i = 0; i < event_data->NumEntries; i++) {
4704 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4707 reason_code = event_data->PortEntry[i].PortStatus;
4708 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4709 _scsih_tm_tr_send(ioc, handle);
4712 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4713 if (!switch_handle) {
4714 _scsih_block_io_to_pcie_children_attached_directly(
4718 /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4719 if ((event_data->SwitchStatus
4720 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4721 (event_data->SwitchStatus ==
4722 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4723 _scsih_block_io_to_pcie_children_attached_directly(
4726 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4729 /* mark ignore flag for pending events */
4730 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4731 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4732 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4736 (Mpi26EventDataPCIeTopologyChangeList_t *)
4737 fw_event->event_data;
4738 if (local_event_data->SwitchStatus ==
4739 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4740 local_event_data->SwitchStatus ==
4741 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4742 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4745 ioc_info(ioc, "setting ignoring flag for switch event\n"));
4746 fw_event->ignore = 1;
4750 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4754 * _scsih_set_volume_delete_flag - setting volume delete flag
4755 * @ioc: per adapter object
4756 * @handle: device handle
4758 * This returns nothing.
4761 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4763 struct _raid_device *raid_device;
4764 struct MPT3SAS_TARGET *sas_target_priv_data;
4765 unsigned long flags;
4767 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4768 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4769 if (raid_device && raid_device->starget &&
4770 raid_device->starget->hostdata) {
4771 sas_target_priv_data =
4772 raid_device->starget->hostdata;
4773 sas_target_priv_data->deleted = 1;
4775 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4776 handle, (u64)raid_device->wwid));
4778 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4782 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4783 * @handle: input handle
4784 * @a: handle for volume a
4785 * @b: handle for volume b
4787 * IR firmware only supports two raid volumes. The purpose of this
4788 * routine is to set the volume handle in either a or b. When the given
4789 * input handle is non-zero, or when a and b have not been set before.
4792 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4794 if (!handle || handle == *a || handle == *b)
4803 * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4804 * @ioc: per adapter object
4805 * @event_data: the event data payload
4806 * Context: interrupt time.
4808 * This routine will send target reset to volume, followed by target
4809 * resets to the PDs. This is called when a PD has been removed, or
4810 * volume has been deleted or removed. When the target reset is sent
4811 * to volume, the PD target resets need to be queued to start upon
4812 * completion of the volume target reset.
4815 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4816 Mpi2EventDataIrConfigChangeList_t *event_data)
4818 Mpi2EventIrConfigElement_t *element;
4820 u16 handle, volume_handle, a, b;
4821 struct _tr_list *delayed_tr;
4826 if (ioc->is_warpdrive)
4829 /* Volume Resets for Deleted or Removed */
4830 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4831 for (i = 0; i < event_data->NumElements; i++, element++) {
4832 if (le32_to_cpu(event_data->Flags) &
4833 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4835 if (element->ReasonCode ==
4836 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4837 element->ReasonCode ==
4838 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4839 volume_handle = le16_to_cpu(element->VolDevHandle);
4840 _scsih_set_volume_delete_flag(ioc, volume_handle);
4841 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4845 /* Volume Resets for UNHIDE events */
4846 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4847 for (i = 0; i < event_data->NumElements; i++, element++) {
4848 if (le32_to_cpu(event_data->Flags) &
4849 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4851 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4852 volume_handle = le16_to_cpu(element->VolDevHandle);
4853 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4858 _scsih_tm_tr_volume_send(ioc, a);
4860 _scsih_tm_tr_volume_send(ioc, b);
4862 /* PD target resets */
4863 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4864 for (i = 0; i < event_data->NumElements; i++, element++) {
4865 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4867 handle = le16_to_cpu(element->PhysDiskDevHandle);
4868 volume_handle = le16_to_cpu(element->VolDevHandle);
4869 clear_bit(handle, ioc->pd_handles);
4871 _scsih_tm_tr_send(ioc, handle);
4872 else if (volume_handle == a || volume_handle == b) {
4873 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4874 BUG_ON(!delayed_tr);
4875 INIT_LIST_HEAD(&delayed_tr->list);
4876 delayed_tr->handle = handle;
4877 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4879 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4882 _scsih_tm_tr_send(ioc, handle);
4888 * _scsih_check_volume_delete_events - set delete flag for volumes
4889 * @ioc: per adapter object
4890 * @event_data: the event data payload
4891 * Context: interrupt time.
4893 * This will handle the case when the cable connected to entire volume is
4894 * pulled. We will take care of setting the deleted flag so normal IO will
4898 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4899 Mpi2EventDataIrVolume_t *event_data)
4903 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4905 state = le32_to_cpu(event_data->NewValue);
4906 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4907 MPI2_RAID_VOL_STATE_FAILED)
4908 _scsih_set_volume_delete_flag(ioc,
4909 le16_to_cpu(event_data->VolDevHandle));
4913 * _scsih_temp_threshold_events - display temperature threshold exceeded events
4914 * @ioc: per adapter object
4915 * @event_data: the temp threshold event data
4916 * Context: interrupt time.
4919 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4920 Mpi2EventDataTemperature_t *event_data)
4923 if (ioc->temp_sensors_count >= event_data->SensorNum) {
4924 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4925 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4926 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4927 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4928 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4929 event_data->SensorNum);
4930 ioc_err(ioc, "Current Temp In Celsius: %d\n",
4931 event_data->CurrentTemperature);
4932 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4933 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4934 if ((doorbell & MPI2_IOC_STATE_MASK) ==
4935 MPI2_IOC_STATE_FAULT) {
4936 mpt3sas_print_fault_code(ioc,
4937 doorbell & MPI2_DOORBELL_DATA_MASK);
4938 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4939 MPI2_IOC_STATE_COREDUMP) {
4940 mpt3sas_print_coredump_info(ioc,
4941 doorbell & MPI2_DOORBELL_DATA_MASK);
4947 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4949 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4951 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4955 return test_and_set_bit(0, &priv->ata_command_pending);
4957 clear_bit(0, &priv->ata_command_pending);
4962 * _scsih_flush_running_cmds - completing outstanding commands.
4963 * @ioc: per adapter object
4965 * The flushing out of all pending scmd commands following host reset,
4966 * where all IO is dropped to the floor.
4969 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4971 struct scsi_cmnd *scmd;
4972 struct scsiio_tracker *st;
4976 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4977 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4981 _scsih_set_satl_pending(scmd, false);
4982 st = scsi_cmd_priv(scmd);
4983 mpt3sas_base_clear_st(ioc, st);
4984 scsi_dma_unmap(scmd);
4985 if (ioc->pci_error_recovery || ioc->remove_host)
4986 scmd->result = DID_NO_CONNECT << 16;
4988 scmd->result = DID_RESET << 16;
4989 scmd->scsi_done(scmd);
4991 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4995 * _scsih_setup_eedp - setup MPI request for EEDP transfer
4996 * @ioc: per adapter object
4997 * @scmd: pointer to scsi command object
4998 * @mpi_request: pointer to the SCSI_IO request message frame
5000 * Supporting protection 1 and 3.
5003 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5004 Mpi25SCSIIORequest_t *mpi_request)
5007 unsigned char prot_op = scsi_get_prot_op(scmd);
5008 unsigned char prot_type = scsi_get_prot_type(scmd);
5009 Mpi25SCSIIORequest_t *mpi_request_3v =
5010 (Mpi25SCSIIORequest_t *)mpi_request;
5012 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
5015 if (prot_op == SCSI_PROT_READ_STRIP)
5016 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5017 else if (prot_op == SCSI_PROT_WRITE_INSERT)
5018 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5022 switch (prot_type) {
5023 case SCSI_PROT_DIF_TYPE1:
5024 case SCSI_PROT_DIF_TYPE2:
5027 * enable ref/guard checking
5028 * auto increment ref tag
5030 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
5031 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
5032 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5033 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5034 cpu_to_be32(t10_pi_ref_tag(scmd->request));
5037 case SCSI_PROT_DIF_TYPE3:
5040 * enable guard checking
5042 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5047 mpi_request_3v->EEDPBlockSize =
5048 cpu_to_le16(scmd->device->sector_size);
5050 if (ioc->is_gen35_ioc)
5051 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5052 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5056 * _scsih_eedp_error_handling - return sense code for EEDP errors
5057 * @scmd: pointer to scsi command object
5058 * @ioc_status: ioc status
5061 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5065 switch (ioc_status) {
5066 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5069 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5072 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5079 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
5081 scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
5082 SAM_STAT_CHECK_CONDITION;
5086 * scsih_qcmd - main scsi request entry point
5087 * @shost: SCSI host pointer
5088 * @scmd: pointer to scsi command object
5090 * The callback index is set inside `ioc->scsi_io_cb_idx`.
5092 * Return: 0 on success. If there's a failure, return either:
5093 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
5094 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
5097 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5099 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5100 struct MPT3SAS_DEVICE *sas_device_priv_data;
5101 struct MPT3SAS_TARGET *sas_target_priv_data;
5102 struct _raid_device *raid_device;
5103 struct request *rq = scmd->request;
5105 Mpi25SCSIIORequest_t *mpi_request;
5106 struct _pcie_device *pcie_device = NULL;
5111 if (ioc->logging_level & MPT_DEBUG_SCSI)
5112 scsi_print_command(scmd);
5114 sas_device_priv_data = scmd->device->hostdata;
5115 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5116 scmd->result = DID_NO_CONNECT << 16;
5117 scmd->scsi_done(scmd);
5121 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5122 scmd->result = DID_NO_CONNECT << 16;
5123 scmd->scsi_done(scmd);
5127 sas_target_priv_data = sas_device_priv_data->sas_target;
5129 /* invalid device handle */
5130 handle = sas_target_priv_data->handle;
5131 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5132 scmd->result = DID_NO_CONNECT << 16;
5133 scmd->scsi_done(scmd);
5138 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5139 /* host recovery or link resets sent via IOCTLs */
5140 return SCSI_MLQUEUE_HOST_BUSY;
5141 } else if (sas_target_priv_data->deleted) {
5142 /* device has been deleted */
5143 scmd->result = DID_NO_CONNECT << 16;
5144 scmd->scsi_done(scmd);
5146 } else if (sas_target_priv_data->tm_busy ||
5147 sas_device_priv_data->block) {
5148 /* device busy with task management */
5149 return SCSI_MLQUEUE_DEVICE_BUSY;
5153 * Bug work around for firmware SATL handling. The loop
5154 * is based on atomic operations and ensures consistency
5155 * since we're lockless at this point
5158 if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5159 return SCSI_MLQUEUE_DEVICE_BUSY;
5160 } while (_scsih_set_satl_pending(scmd, true));
5162 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5163 mpi_control = MPI2_SCSIIO_CONTROL_READ;
5164 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5165 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5167 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5170 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5171 /* NCQ Prio supported, make sure control indicated high priority */
5172 if (sas_device_priv_data->ncq_prio_enable) {
5173 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5174 if (class == IOPRIO_CLASS_RT)
5175 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5177 /* Make sure Device is not raid volume.
5178 * We do not expose raid functionality to upper layer for warpdrive.
5180 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5181 && !scsih_is_nvme(&scmd->device->sdev_gendev))
5182 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5183 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5185 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5187 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5188 _scsih_set_satl_pending(scmd, false);
5191 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5192 memset(mpi_request, 0, ioc->request_sz);
5193 _scsih_setup_eedp(ioc, scmd, mpi_request);
5195 if (scmd->cmd_len == 32)
5196 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5197 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5198 if (sas_device_priv_data->sas_target->flags &
5199 MPT_TARGET_FLAGS_RAID_COMPONENT)
5200 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5202 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5203 mpi_request->DevHandle = cpu_to_le16(handle);
5204 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5205 mpi_request->Control = cpu_to_le32(mpi_control);
5206 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5207 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5208 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5209 mpi_request->SenseBufferLowAddress =
5210 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5211 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5212 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5214 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5216 if (mpi_request->DataLength) {
5217 pcie_device = sas_target_priv_data->pcie_dev;
5218 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5219 mpt3sas_base_free_smid(ioc, smid);
5220 _scsih_set_satl_pending(scmd, false);
5224 ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5226 raid_device = sas_target_priv_data->raid_device;
5227 if (raid_device && raid_device->direct_io_enabled)
5228 mpt3sas_setup_direct_io(ioc, scmd,
5229 raid_device, mpi_request);
5231 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5232 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5233 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5234 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5235 ioc->put_smid_fast_path(ioc, smid, handle);
5237 ioc->put_smid_scsi_io(ioc, smid,
5238 le16_to_cpu(mpi_request->DevHandle));
5240 ioc->put_smid_default(ioc, smid);
5244 return SCSI_MLQUEUE_HOST_BUSY;
5248 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5249 * @sense_buffer: sense data returned by target
5250 * @data: normalized skey/asc/ascq
5253 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5255 if ((sense_buffer[0] & 0x7F) >= 0x72) {
5256 /* descriptor format */
5257 data->skey = sense_buffer[1] & 0x0F;
5258 data->asc = sense_buffer[2];
5259 data->ascq = sense_buffer[3];
5262 data->skey = sense_buffer[2] & 0x0F;
5263 data->asc = sense_buffer[12];
5264 data->ascq = sense_buffer[13];
5269 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
5270 * @ioc: per adapter object
5271 * @scmd: pointer to scsi command object
5272 * @mpi_reply: reply mf payload returned from firmware
5275 * scsi_status - SCSI Status code returned from target device
5276 * scsi_state - state info associated with SCSI_IO determined by ioc
5277 * ioc_status - ioc supplied status info
5280 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5281 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5285 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5286 MPI2_IOCSTATUS_MASK;
5287 u8 scsi_state = mpi_reply->SCSIState;
5288 u8 scsi_status = mpi_reply->SCSIStatus;
5289 char *desc_ioc_state = NULL;
5290 char *desc_scsi_status = NULL;
5291 char *desc_scsi_state = ioc->tmp_string;
5292 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5293 struct _sas_device *sas_device = NULL;
5294 struct _pcie_device *pcie_device = NULL;
5295 struct scsi_target *starget = scmd->device->sdev_target;
5296 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5297 char *device_str = NULL;
5301 if (ioc->hide_ir_msg)
5302 device_str = "WarpDrive";
5304 device_str = "volume";
5306 if (log_info == 0x31170000)
5309 switch (ioc_status) {
5310 case MPI2_IOCSTATUS_SUCCESS:
5311 desc_ioc_state = "success";
5313 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5314 desc_ioc_state = "invalid function";
5316 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5317 desc_ioc_state = "scsi recovered error";
5319 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5320 desc_ioc_state = "scsi invalid dev handle";
5322 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5323 desc_ioc_state = "scsi device not there";
5325 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5326 desc_ioc_state = "scsi data overrun";
5328 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5329 desc_ioc_state = "scsi data underrun";
5331 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5332 desc_ioc_state = "scsi io data error";
5334 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5335 desc_ioc_state = "scsi protocol error";
5337 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5338 desc_ioc_state = "scsi task terminated";
5340 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5341 desc_ioc_state = "scsi residual mismatch";
5343 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5344 desc_ioc_state = "scsi task mgmt failed";
5346 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5347 desc_ioc_state = "scsi ioc terminated";
5349 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5350 desc_ioc_state = "scsi ext terminated";
5352 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5353 desc_ioc_state = "eedp guard error";
5355 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5356 desc_ioc_state = "eedp ref tag error";
5358 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5359 desc_ioc_state = "eedp app tag error";
5361 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5362 desc_ioc_state = "insufficient power";
5365 desc_ioc_state = "unknown";
5369 switch (scsi_status) {
5370 case MPI2_SCSI_STATUS_GOOD:
5371 desc_scsi_status = "good";
5373 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5374 desc_scsi_status = "check condition";
5376 case MPI2_SCSI_STATUS_CONDITION_MET:
5377 desc_scsi_status = "condition met";
5379 case MPI2_SCSI_STATUS_BUSY:
5380 desc_scsi_status = "busy";
5382 case MPI2_SCSI_STATUS_INTERMEDIATE:
5383 desc_scsi_status = "intermediate";
5385 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5386 desc_scsi_status = "intermediate condmet";
5388 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5389 desc_scsi_status = "reservation conflict";
5391 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5392 desc_scsi_status = "command terminated";
5394 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5395 desc_scsi_status = "task set full";
5397 case MPI2_SCSI_STATUS_ACA_ACTIVE:
5398 desc_scsi_status = "aca active";
5400 case MPI2_SCSI_STATUS_TASK_ABORTED:
5401 desc_scsi_status = "task aborted";
5404 desc_scsi_status = "unknown";
5408 desc_scsi_state[0] = '\0';
5410 desc_scsi_state = " ";
5411 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5412 strcat(desc_scsi_state, "response info ");
5413 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5414 strcat(desc_scsi_state, "state terminated ");
5415 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5416 strcat(desc_scsi_state, "no status ");
5417 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5418 strcat(desc_scsi_state, "autosense failed ");
5419 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5420 strcat(desc_scsi_state, "autosense valid ");
5422 scsi_print_command(scmd);
5424 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5425 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5426 device_str, (u64)priv_target->sas_address);
5427 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5428 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5430 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5431 (u64)pcie_device->wwid, pcie_device->port_num);
5432 if (pcie_device->enclosure_handle != 0)
5433 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5434 (u64)pcie_device->enclosure_logical_id,
5436 if (pcie_device->connector_name[0])
5437 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5438 pcie_device->enclosure_level,
5439 pcie_device->connector_name);
5440 pcie_device_put(pcie_device);
5443 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5445 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5446 (u64)sas_device->sas_address, sas_device->phy);
5448 _scsih_display_enclosure_chassis_info(ioc, sas_device,
5451 sas_device_put(sas_device);
5455 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5456 le16_to_cpu(mpi_reply->DevHandle),
5457 desc_ioc_state, ioc_status, smid);
5458 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5459 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5460 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5461 le16_to_cpu(mpi_reply->TaskTag),
5462 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5463 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5464 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5466 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5467 struct sense_info data;
5468 _scsih_normalize_sense(scmd->sense_buffer, &data);
5469 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5470 data.skey, data.asc, data.ascq,
5471 le32_to_cpu(mpi_reply->SenseCount));
5473 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5474 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5475 response_bytes = (u8 *)&response_info;
5476 _scsih_response_code(ioc, response_bytes[0]);
5481 * _scsih_turn_on_pfa_led - illuminate PFA LED
5482 * @ioc: per adapter object
5483 * @handle: device handle
5487 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5489 Mpi2SepReply_t mpi_reply;
5490 Mpi2SepRequest_t mpi_request;
5491 struct _sas_device *sas_device;
5493 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5497 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5498 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5499 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5500 mpi_request.SlotStatus =
5501 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5502 mpi_request.DevHandle = cpu_to_le16(handle);
5503 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5504 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5505 &mpi_request)) != 0) {
5506 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5507 __FILE__, __LINE__, __func__);
5510 sas_device->pfa_led_on = 1;
5512 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5514 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5515 le16_to_cpu(mpi_reply.IOCStatus),
5516 le32_to_cpu(mpi_reply.IOCLogInfo)));
5520 sas_device_put(sas_device);
5524 * _scsih_turn_off_pfa_led - turn off Fault LED
5525 * @ioc: per adapter object
5526 * @sas_device: sas device whose PFA LED has to turned off
5530 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5531 struct _sas_device *sas_device)
5533 Mpi2SepReply_t mpi_reply;
5534 Mpi2SepRequest_t mpi_request;
5536 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5537 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5538 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5539 mpi_request.SlotStatus = 0;
5540 mpi_request.Slot = cpu_to_le16(sas_device->slot);
5541 mpi_request.DevHandle = 0;
5542 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5543 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5544 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5545 &mpi_request)) != 0) {
5546 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5547 __FILE__, __LINE__, __func__);
5551 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5553 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5554 le16_to_cpu(mpi_reply.IOCStatus),
5555 le32_to_cpu(mpi_reply.IOCLogInfo)));
5561 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5562 * @ioc: per adapter object
5563 * @handle: device handle
5564 * Context: interrupt.
5567 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5569 struct fw_event_work *fw_event;
5571 fw_event = alloc_fw_event_work(0);
5574 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5575 fw_event->device_handle = handle;
5576 fw_event->ioc = ioc;
5577 _scsih_fw_event_add(ioc, fw_event);
5578 fw_event_work_put(fw_event);
5582 * _scsih_smart_predicted_fault - process smart errors
5583 * @ioc: per adapter object
5584 * @handle: device handle
5585 * Context: interrupt.
5588 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5590 struct scsi_target *starget;
5591 struct MPT3SAS_TARGET *sas_target_priv_data;
5592 Mpi2EventNotificationReply_t *event_reply;
5593 Mpi2EventDataSasDeviceStatusChange_t *event_data;
5594 struct _sas_device *sas_device;
5596 unsigned long flags;
5598 /* only handle non-raid devices */
5599 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5600 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5604 starget = sas_device->starget;
5605 sas_target_priv_data = starget->hostdata;
5607 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5608 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5611 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5613 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5615 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5616 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5618 /* insert into event log */
5619 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5620 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5621 event_reply = kzalloc(sz, GFP_ATOMIC);
5623 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5624 __FILE__, __LINE__, __func__);
5628 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5629 event_reply->Event =
5630 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5631 event_reply->MsgLength = sz/4;
5632 event_reply->EventDataLength =
5633 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5634 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5635 event_reply->EventData;
5636 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5637 event_data->ASC = 0x5D;
5638 event_data->DevHandle = cpu_to_le16(handle);
5639 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5640 mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5644 sas_device_put(sas_device);
5648 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5653 * _scsih_io_done - scsi request callback
5654 * @ioc: per adapter object
5655 * @smid: system request message index
5656 * @msix_index: MSIX table index supplied by the OS
5657 * @reply: reply message frame(lower 32bit addr)
5659 * Callback handler when using _scsih_qcmd.
5661 * Return: 1 meaning mf should be freed from _base_interrupt
5662 * 0 means the mf is freed from this function.
5665 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5667 Mpi25SCSIIORequest_t *mpi_request;
5668 Mpi2SCSIIOReply_t *mpi_reply;
5669 struct scsi_cmnd *scmd;
5670 struct scsiio_tracker *st;
5676 struct MPT3SAS_DEVICE *sas_device_priv_data;
5677 u32 response_code = 0;
5679 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5681 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5685 _scsih_set_satl_pending(scmd, false);
5687 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5689 if (mpi_reply == NULL) {
5690 scmd->result = DID_OK << 16;
5694 sas_device_priv_data = scmd->device->hostdata;
5695 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5696 sas_device_priv_data->sas_target->deleted) {
5697 scmd->result = DID_NO_CONNECT << 16;
5700 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5703 * WARPDRIVE: If direct_io is set then it is directIO,
5704 * the failed direct I/O should be redirected to volume
5706 st = scsi_cmd_priv(scmd);
5707 if (st->direct_io &&
5708 ((ioc_status & MPI2_IOCSTATUS_MASK)
5709 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5712 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5713 mpi_request->DevHandle =
5714 cpu_to_le16(sas_device_priv_data->sas_target->handle);
5715 ioc->put_smid_scsi_io(ioc, smid,
5716 sas_device_priv_data->sas_target->handle);
5719 /* turning off TLR */
5720 scsi_state = mpi_reply->SCSIState;
5721 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5723 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5724 if (!sas_device_priv_data->tlr_snoop_check) {
5725 sas_device_priv_data->tlr_snoop_check++;
5726 if ((!ioc->is_warpdrive &&
5727 !scsih_is_raid(&scmd->device->sdev_gendev) &&
5728 !scsih_is_nvme(&scmd->device->sdev_gendev))
5729 && sas_is_tlr_enabled(scmd->device) &&
5730 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5731 sas_disable_tlr(scmd->device);
5732 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5736 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5737 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5738 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5739 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5742 ioc_status &= MPI2_IOCSTATUS_MASK;
5743 scsi_status = mpi_reply->SCSIStatus;
5745 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5746 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5747 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5748 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5749 ioc_status = MPI2_IOCSTATUS_SUCCESS;
5752 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5753 struct sense_info data;
5754 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5756 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5757 le32_to_cpu(mpi_reply->SenseCount));
5758 memcpy(scmd->sense_buffer, sense_data, sz);
5759 _scsih_normalize_sense(scmd->sense_buffer, &data);
5760 /* failure prediction threshold exceeded */
5761 if (data.asc == 0x5D)
5762 _scsih_smart_predicted_fault(ioc,
5763 le16_to_cpu(mpi_reply->DevHandle));
5764 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5766 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5767 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5768 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5769 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5770 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5772 switch (ioc_status) {
5773 case MPI2_IOCSTATUS_BUSY:
5774 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5775 scmd->result = SAM_STAT_BUSY;
5778 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5779 scmd->result = DID_NO_CONNECT << 16;
5782 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5783 if (sas_device_priv_data->block) {
5784 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5787 if (log_info == 0x31110630) {
5788 if (scmd->retries > 2) {
5789 scmd->result = DID_NO_CONNECT << 16;
5790 scsi_device_set_state(scmd->device,
5793 scmd->result = DID_SOFT_ERROR << 16;
5794 scmd->device->expecting_cc_ua = 1;
5797 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5798 scmd->result = DID_RESET << 16;
5800 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5801 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5802 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5803 scmd->result = DID_RESET << 16;
5806 scmd->result = DID_SOFT_ERROR << 16;
5808 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5809 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5810 scmd->result = DID_RESET << 16;
5813 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5814 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5815 scmd->result = DID_SOFT_ERROR << 16;
5817 scmd->result = (DID_OK << 16) | scsi_status;
5820 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5821 scmd->result = (DID_OK << 16) | scsi_status;
5823 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5826 if (xfer_cnt < scmd->underflow) {
5827 if (scsi_status == SAM_STAT_BUSY)
5828 scmd->result = SAM_STAT_BUSY;
5830 scmd->result = DID_SOFT_ERROR << 16;
5831 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5832 MPI2_SCSI_STATE_NO_SCSI_STATUS))
5833 scmd->result = DID_SOFT_ERROR << 16;
5834 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5835 scmd->result = DID_RESET << 16;
5836 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5837 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5838 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5839 scmd->result = (DRIVER_SENSE << 24) |
5840 SAM_STAT_CHECK_CONDITION;
5841 scmd->sense_buffer[0] = 0x70;
5842 scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5843 scmd->sense_buffer[12] = 0x20;
5844 scmd->sense_buffer[13] = 0;
5848 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5849 scsi_set_resid(scmd, 0);
5851 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5852 case MPI2_IOCSTATUS_SUCCESS:
5853 scmd->result = (DID_OK << 16) | scsi_status;
5854 if (response_code ==
5855 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5856 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5857 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5858 scmd->result = DID_SOFT_ERROR << 16;
5859 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5860 scmd->result = DID_RESET << 16;
5863 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5864 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5865 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5866 _scsih_eedp_error_handling(scmd, ioc_status);
5869 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5870 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5871 case MPI2_IOCSTATUS_INVALID_SGL:
5872 case MPI2_IOCSTATUS_INTERNAL_ERROR:
5873 case MPI2_IOCSTATUS_INVALID_FIELD:
5874 case MPI2_IOCSTATUS_INVALID_STATE:
5875 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5876 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5877 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5879 scmd->result = DID_SOFT_ERROR << 16;
5884 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5885 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5889 scsi_dma_unmap(scmd);
5890 mpt3sas_base_free_smid(ioc, smid);
5891 scmd->scsi_done(scmd);
5896 * _scsih_update_vphys_after_reset - update the Port's
5897 * vphys_list after reset
5898 * @ioc: per adapter object
5903 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5907 Mpi2ConfigReply_t mpi_reply;
5908 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5909 u16 attached_handle;
5910 u64 attached_sas_addr;
5911 u8 found = 0, port_id;
5912 Mpi2SasPhyPage0_t phy_pg0;
5913 struct hba_port *port, *port_next, *mport;
5914 struct virtual_phy *vphy, *vphy_next;
5915 struct _sas_device *sas_device;
5918 * Mark all the vphys objects as dirty.
5920 list_for_each_entry_safe(port, port_next,
5921 &ioc->port_table_list, list) {
5922 if (!port->vphys_mask)
5924 list_for_each_entry_safe(vphy, vphy_next,
5925 &port->vphys_list, list) {
5926 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5931 * Read SASIOUnitPage0 to get each HBA Phy's data.
5933 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) +
5934 (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t));
5935 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5936 if (!sas_iounit_pg0) {
5937 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5938 __FILE__, __LINE__, __func__);
5941 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5942 sas_iounit_pg0, sz)) != 0)
5944 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5945 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5948 * Loop over each HBA Phy.
5950 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5952 * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
5954 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
5955 MPI2_SAS_NEG_LINK_RATE_1_5)
5958 * Check whether Phy is connected to SEP device or not,
5959 * if it is SEP device then read the Phy's SASPHYPage0 data to
5960 * determine whether Phy is a virtual Phy or not. if it is
5961 * virtual phy then it is conformed that the attached remote
5962 * device is a HBA's vSES device.
5965 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
5966 MPI2_SAS_DEVICE_INFO_SEP))
5969 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5971 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5972 __FILE__, __LINE__, __func__);
5976 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
5977 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
5980 * Get the vSES device's SAS Address.
5982 attached_handle = le16_to_cpu(
5983 sas_iounit_pg0->PhyData[i].AttachedDevHandle);
5984 if (_scsih_get_sas_address(ioc, attached_handle,
5985 &attached_sas_addr) != 0) {
5986 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5987 __FILE__, __LINE__, __func__);
5992 port = port_next = NULL;
5994 * Loop over each virtual_phy object from
5995 * each port's vphys_list.
5997 list_for_each_entry_safe(port,
5998 port_next, &ioc->port_table_list, list) {
5999 if (!port->vphys_mask)
6001 list_for_each_entry_safe(vphy, vphy_next,
6002 &port->vphys_list, list) {
6004 * Continue with next virtual_phy object
6005 * if the object is not marked as dirty.
6007 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6011 * Continue with next virtual_phy object
6012 * if the object's SAS Address is not equals
6013 * to current Phy's vSES device SAS Address.
6015 if (vphy->sas_address != attached_sas_addr)
6018 * Enable current Phy number bit in object's
6021 if (!(vphy->phy_mask & (1 << i)))
6022 vphy->phy_mask = (1 << i);
6024 * Get hba_port object from hba_port table
6025 * corresponding to current phy's Port ID.
6026 * if there is no hba_port object corresponding
6027 * to Phy's Port ID then create a new hba_port
6028 * object & add to hba_port table.
6030 port_id = sas_iounit_pg0->PhyData[i].Port;
6031 mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6034 sizeof(struct hba_port), GFP_KERNEL);
6037 mport->port_id = port_id;
6039 "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6040 __func__, mport, mport->port_id);
6041 list_add_tail(&mport->list,
6042 &ioc->port_table_list);
6045 * If mport & port pointers are not pointing to
6046 * same hba_port object then it means that vSES
6047 * device's Port ID got changed after reset and
6048 * hence move current virtual_phy object from
6049 * port's vphys_list to mport's vphys_list.
6051 if (port != mport) {
6052 if (!mport->vphys_mask)
6054 &mport->vphys_list);
6055 mport->vphys_mask |= (1 << i);
6056 port->vphys_mask &= ~(1 << i);
6057 list_move(&vphy->list,
6058 &mport->vphys_list);
6059 sas_device = mpt3sas_get_sdev_by_addr(
6060 ioc, attached_sas_addr, port);
6062 sas_device->port = mport;
6065 * Earlier while updating the hba_port table,
6066 * it is determined that there is no other
6067 * direct attached device with mport's Port ID,
6068 * Hence mport was marked as dirty. Only vSES
6069 * device has this Port ID, so unmark the mport
6072 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6073 mport->sas_address = 0;
6074 mport->phy_mask = 0;
6076 ~HBA_PORT_FLAG_DIRTY_PORT;
6079 * Unmark current virtual_phy object as dirty.
6081 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6090 kfree(sas_iounit_pg0);
6094 * _scsih_get_port_table_after_reset - Construct temporary port table
6095 * @ioc: per adapter object
6096 * @port_table: address where port table needs to be constructed
6098 * return number of HBA port entries available after reset.
6101 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6102 struct hba_port *port_table)
6106 Mpi2ConfigReply_t mpi_reply;
6107 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6108 u16 attached_handle;
6109 u64 attached_sas_addr;
6110 u8 found = 0, port_count = 0, port_id;
6112 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6113 * sizeof(Mpi2SasIOUnit0PhyData_t));
6114 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6115 if (!sas_iounit_pg0) {
6116 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6117 __FILE__, __LINE__, __func__);
6121 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6122 sas_iounit_pg0, sz)) != 0)
6124 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6125 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6127 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6129 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6130 MPI2_SAS_NEG_LINK_RATE_1_5)
6133 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6134 if (_scsih_get_sas_address(
6135 ioc, attached_handle, &attached_sas_addr) != 0) {
6136 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6137 __FILE__, __LINE__, __func__);
6141 for (j = 0; j < port_count; j++) {
6142 port_id = sas_iounit_pg0->PhyData[i].Port;
6143 if (port_table[j].port_id == port_id &&
6144 port_table[j].sas_address == attached_sas_addr) {
6145 port_table[j].phy_mask |= (1 << i);
6154 port_id = sas_iounit_pg0->PhyData[i].Port;
6155 port_table[port_count].port_id = port_id;
6156 port_table[port_count].phy_mask = (1 << i);
6157 port_table[port_count].sas_address = attached_sas_addr;
6161 kfree(sas_iounit_pg0);
6165 enum hba_port_matched_codes {
6167 MATCHED_WITH_ADDR_AND_PHYMASK,
6168 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6169 MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6174 * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
6175 * from HBA port table
6176 * @ioc: per adapter object
6177 * @port_entry - hba port entry from temporary port table which needs to be
6178 * searched for matched entry in the HBA port table
6179 * @matched_port_entry - save matched hba port entry here
6180 * @count - count of matched entries
6182 * return type of matched entry found.
6184 static enum hba_port_matched_codes
6185 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6186 struct hba_port *port_entry,
6187 struct hba_port **matched_port_entry, int *count)
6189 struct hba_port *port_table_entry, *matched_port = NULL;
6190 enum hba_port_matched_codes matched_code = NOT_MATCHED;
6192 *matched_port_entry = NULL;
6194 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6195 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6198 if ((port_table_entry->sas_address == port_entry->sas_address)
6199 && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6200 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6201 matched_port = port_table_entry;
6205 if ((port_table_entry->sas_address == port_entry->sas_address)
6206 && (port_table_entry->phy_mask & port_entry->phy_mask)
6207 && (port_table_entry->port_id == port_entry->port_id)) {
6208 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6209 matched_port = port_table_entry;
6213 if ((port_table_entry->sas_address == port_entry->sas_address)
6214 && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6216 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6218 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6219 matched_port = port_table_entry;
6223 if (port_table_entry->sas_address == port_entry->sas_address) {
6225 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6227 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6229 matched_code = MATCHED_WITH_ADDR;
6230 matched_port = port_table_entry;
6235 *matched_port_entry = matched_port;
6236 if (matched_code == MATCHED_WITH_ADDR)
6238 return matched_code;
6242 * _scsih_del_phy_part_of_anther_port - remove phy if it
6243 * is a part of anther port
6244 *@ioc: per adapter object
6245 *@port_table: port table after reset
6246 *@index: hba port entry index
6247 *@port_count: number of ports available after host reset
6248 *@offset: HBA phy bit offset
6252 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6253 struct hba_port *port_table,
6254 int index, u8 port_count, int offset)
6256 struct _sas_node *sas_node = &ioc->sas_hba;
6259 for (i = 0; i < port_count; i++) {
6263 if (port_table[i].phy_mask & (1 << offset)) {
6264 mpt3sas_transport_del_phy_from_an_existing_port(
6265 ioc, sas_node, &sas_node->phy[offset]);
6271 port_table[index].phy_mask |= (1 << offset);
6275 * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
6277 *@ioc: per adapter object
6278 *@hba_port_entry: hba port table entry
6279 *@port_table: temporary port table
6280 *@index: hba port entry index
6281 *@port_count: number of ports available after host reset
6285 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6286 struct hba_port *hba_port_entry, struct hba_port *port_table,
6287 int index, int port_count)
6289 u32 phy_mask, offset = 0;
6290 struct _sas_node *sas_node = &ioc->sas_hba;
6292 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6294 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6295 if (phy_mask & (1 << offset)) {
6296 if (!(port_table[index].phy_mask & (1 << offset))) {
6297 _scsih_del_phy_part_of_anther_port(
6298 ioc, port_table, index, port_count,
6302 if (sas_node->phy[offset].phy_belongs_to_port)
6303 mpt3sas_transport_del_phy_from_an_existing_port(
6304 ioc, sas_node, &sas_node->phy[offset]);
6305 mpt3sas_transport_add_phy_to_an_existing_port(
6306 ioc, sas_node, &sas_node->phy[offset],
6307 hba_port_entry->sas_address,
6314 * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
6315 * @ioc: per adapter object
6320 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6322 struct hba_port *port, *port_next;
6323 struct virtual_phy *vphy, *vphy_next;
6325 list_for_each_entry_safe(port, port_next,
6326 &ioc->port_table_list, list) {
6327 if (!port->vphys_mask)
6329 list_for_each_entry_safe(vphy, vphy_next,
6330 &port->vphys_list, list) {
6331 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6332 drsprintk(ioc, ioc_info(ioc,
6333 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6334 vphy, port->port_id,
6336 port->vphys_mask &= ~vphy->phy_mask;
6337 list_del(&vphy->list);
6341 if (!port->vphys_mask && !port->sas_address)
6342 port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6347 * _scsih_del_dirty_port_entries - delete dirty port entries from port list
6349 *@ioc: per adapter object
6353 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6355 struct hba_port *port, *port_next;
6357 list_for_each_entry_safe(port, port_next,
6358 &ioc->port_table_list, list) {
6359 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6360 port->flags & HBA_PORT_FLAG_NEW_PORT)
6363 drsprintk(ioc, ioc_info(ioc,
6364 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6365 port, port->port_id, port->phy_mask));
6366 list_del(&port->list);
6372 * _scsih_sas_port_refresh - Update HBA port table after host reset
6373 * @ioc: per adapter object
6376 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6379 struct hba_port *port_table;
6380 struct hba_port *port_table_entry;
6381 struct hba_port *port_entry = NULL;
6382 int i, j, count = 0, lcount = 0;
6386 drsprintk(ioc, ioc_info(ioc,
6387 "updating ports for sas_host(0x%016llx)\n",
6388 (unsigned long long)ioc->sas_hba.sas_address));
6390 port_table = kcalloc(ioc->sas_hba.num_phys,
6391 sizeof(struct hba_port), GFP_KERNEL);
6395 port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6399 drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6400 for (j = 0; j < port_count; j++)
6401 drsprintk(ioc, ioc_info(ioc,
6402 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6403 port_table[j].port_id,
6404 port_table[j].phy_mask, port_table[j].sas_address));
6406 list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6407 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6409 drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6410 port_table_entry = NULL;
6411 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6412 drsprintk(ioc, ioc_info(ioc,
6413 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6414 port_table_entry->port_id,
6415 port_table_entry->phy_mask,
6416 port_table_entry->sas_address));
6419 for (j = 0; j < port_count; j++) {
6420 ret = _scsih_look_and_get_matched_port_entry(ioc,
6421 &port_table[j], &port_entry, &count);
6423 drsprintk(ioc, ioc_info(ioc,
6424 "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6425 port_table[j].sas_address,
6426 port_table[j].port_id));
6431 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6432 case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6433 _scsih_add_or_del_phys_from_existing_port(ioc,
6434 port_entry, port_table, j, port_count);
6436 case MATCHED_WITH_ADDR:
6437 sas_addr = port_table[j].sas_address;
6438 for (i = 0; i < port_count; i++) {
6439 if (port_table[i].sas_address == sas_addr)
6443 if (count > 1 || lcount > 1)
6446 _scsih_add_or_del_phys_from_existing_port(ioc,
6447 port_entry, port_table, j, port_count);
6453 if (port_entry->port_id != port_table[j].port_id)
6454 port_entry->port_id = port_table[j].port_id;
6455 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6456 port_entry->phy_mask = port_table[j].phy_mask;
6459 port_table_entry = NULL;
6463 * _scsih_alloc_vphy - allocate virtual_phy object
6464 * @ioc: per adapter object
6465 * @port_id: Port ID number
6466 * @phy_num: HBA Phy number
6468 * Returns allocated virtual_phy object.
6470 static struct virtual_phy *
6471 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6473 struct virtual_phy *vphy;
6474 struct hba_port *port;
6476 port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6480 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6482 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6487 * Enable bit corresponding to HBA phy number on its
6488 * parent hba_port object's vphys_mask field.
6490 port->vphys_mask |= (1 << phy_num);
6491 vphy->phy_mask |= (1 << phy_num);
6493 INIT_LIST_HEAD(&port->vphys_list);
6494 list_add_tail(&vphy->list, &port->vphys_list);
6497 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6498 vphy, port->port_id, phy_num);
6504 * _scsih_sas_host_refresh - refreshing sas host object contents
6505 * @ioc: per adapter object
6508 * During port enable, fw will send topology events for every device. Its
6509 * possible that the handles may change from the previous setting, so this
6510 * code keeping handles updating if changed.
6513 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6518 Mpi2ConfigReply_t mpi_reply;
6519 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6520 u16 attached_handle;
6521 u8 link_rate, port_id;
6522 struct hba_port *port;
6523 Mpi2SasPhyPage0_t phy_pg0;
6526 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6527 (u64)ioc->sas_hba.sas_address));
6529 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6530 * sizeof(Mpi2SasIOUnit0PhyData_t));
6531 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6532 if (!sas_iounit_pg0) {
6533 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6534 __FILE__, __LINE__, __func__);
6538 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6539 sas_iounit_pg0, sz)) != 0)
6541 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6542 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6544 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6545 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6547 ioc->sas_hba.handle = le16_to_cpu(
6548 sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6549 port_id = sas_iounit_pg0->PhyData[i].Port;
6550 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6551 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6555 port->port_id = port_id;
6557 "hba_port entry: %p, port: %d is added to hba_port list\n",
6558 port, port->port_id);
6559 if (ioc->shost_recovery)
6560 port->flags = HBA_PORT_FLAG_NEW_PORT;
6561 list_add_tail(&port->list, &ioc->port_table_list);
6564 * Check whether current Phy belongs to HBA vSES device or not.
6566 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6567 MPI2_SAS_DEVICE_INFO_SEP &&
6568 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
6569 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6572 "failure at %s:%d/%s()!\n",
6573 __FILE__, __LINE__, __func__);
6576 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6577 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6580 * Allocate a virtual_phy object for vSES device, if
6581 * this vSES device is hot added.
6583 if (!_scsih_alloc_vphy(ioc, port_id, i))
6585 ioc->sas_hba.phy[i].hba_vphy = 1;
6588 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6589 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6591 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6592 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6593 ioc->sas_hba.phy[i].port =
6594 mpt3sas_get_port_by_id(ioc, port_id, 0);
6595 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6596 attached_handle, i, link_rate,
6597 ioc->sas_hba.phy[i].port);
6600 kfree(sas_iounit_pg0);
6604 * _scsih_sas_host_add - create sas host object
6605 * @ioc: per adapter object
6607 * Creating host side data object, stored in ioc->sas_hba
6610 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6613 Mpi2ConfigReply_t mpi_reply;
6614 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6615 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6616 Mpi2SasPhyPage0_t phy_pg0;
6617 Mpi2SasDevicePage0_t sas_device_pg0;
6618 Mpi2SasEnclosurePage0_t enclosure_pg0;
6621 u8 device_missing_delay;
6622 u8 num_phys, port_id;
6623 struct hba_port *port;
6625 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6627 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6628 __FILE__, __LINE__, __func__);
6631 ioc->sas_hba.phy = kcalloc(num_phys,
6632 sizeof(struct _sas_phy), GFP_KERNEL);
6633 if (!ioc->sas_hba.phy) {
6634 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6635 __FILE__, __LINE__, __func__);
6638 ioc->sas_hba.num_phys = num_phys;
6640 /* sas_iounit page 0 */
6641 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
6642 sizeof(Mpi2SasIOUnit0PhyData_t));
6643 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6644 if (!sas_iounit_pg0) {
6645 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6646 __FILE__, __LINE__, __func__);
6649 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6650 sas_iounit_pg0, sz))) {
6651 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6652 __FILE__, __LINE__, __func__);
6655 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6656 MPI2_IOCSTATUS_MASK;
6657 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6658 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6659 __FILE__, __LINE__, __func__);
6663 /* sas_iounit page 1 */
6664 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
6665 sizeof(Mpi2SasIOUnit1PhyData_t));
6666 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6667 if (!sas_iounit_pg1) {
6668 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6669 __FILE__, __LINE__, __func__);
6672 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6673 sas_iounit_pg1, sz))) {
6674 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6675 __FILE__, __LINE__, __func__);
6678 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6679 MPI2_IOCSTATUS_MASK;
6680 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6681 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6682 __FILE__, __LINE__, __func__);
6686 ioc->io_missing_delay =
6687 sas_iounit_pg1->IODeviceMissingDelay;
6688 device_missing_delay =
6689 sas_iounit_pg1->ReportDeviceMissingDelay;
6690 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6691 ioc->device_missing_delay = (device_missing_delay &
6692 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6694 ioc->device_missing_delay = device_missing_delay &
6695 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6697 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
6698 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6699 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6701 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6702 __FILE__, __LINE__, __func__);
6705 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6706 MPI2_IOCSTATUS_MASK;
6707 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6708 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6709 __FILE__, __LINE__, __func__);
6714 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6715 PhyData[0].ControllerDevHandle);
6717 port_id = sas_iounit_pg0->PhyData[i].Port;
6718 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6719 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6723 port->port_id = port_id;
6725 "hba_port entry: %p, port: %d is added to hba_port list\n",
6726 port, port->port_id);
6727 list_add_tail(&port->list,
6728 &ioc->port_table_list);
6732 * Check whether current Phy belongs to HBA vSES device or not.
6734 if ((le32_to_cpu(phy_pg0.PhyInfo) &
6735 MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6736 (phy_pg0.NegotiatedLinkRate >> 4) >=
6737 MPI2_SAS_NEG_LINK_RATE_1_5) {
6739 * Allocate a virtual_phy object for vSES device.
6741 if (!_scsih_alloc_vphy(ioc, port_id, i))
6743 ioc->sas_hba.phy[i].hba_vphy = 1;
6746 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6747 ioc->sas_hba.phy[i].phy_id = i;
6748 ioc->sas_hba.phy[i].port =
6749 mpt3sas_get_port_by_id(ioc, port_id, 0);
6750 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6751 phy_pg0, ioc->sas_hba.parent_dev);
6753 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6754 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
6755 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6756 __FILE__, __LINE__, __func__);
6759 ioc->sas_hba.enclosure_handle =
6760 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6761 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6762 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6763 ioc->sas_hba.handle,
6764 (u64)ioc->sas_hba.sas_address,
6765 ioc->sas_hba.num_phys);
6767 if (ioc->sas_hba.enclosure_handle) {
6768 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6769 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6770 ioc->sas_hba.enclosure_handle)))
6771 ioc->sas_hba.enclosure_logical_id =
6772 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6776 kfree(sas_iounit_pg1);
6777 kfree(sas_iounit_pg0);
6781 * _scsih_expander_add - creating expander object
6782 * @ioc: per adapter object
6783 * @handle: expander handle
6785 * Creating expander object, stored in ioc->sas_expander_list.
6787 * Return: 0 for success, else error.
6790 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6792 struct _sas_node *sas_expander;
6793 struct _enclosure_node *enclosure_dev;
6794 Mpi2ConfigReply_t mpi_reply;
6795 Mpi2ExpanderPage0_t expander_pg0;
6796 Mpi2ExpanderPage1_t expander_pg1;
6799 u64 sas_address, sas_address_parent = 0;
6801 unsigned long flags;
6802 struct _sas_port *mpt3sas_port = NULL;
6810 if (ioc->shost_recovery || ioc->pci_error_recovery)
6813 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6814 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
6815 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6816 __FILE__, __LINE__, __func__);
6820 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6821 MPI2_IOCSTATUS_MASK;
6822 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6823 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6824 __FILE__, __LINE__, __func__);
6828 /* handle out of order topology events */
6829 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6830 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6832 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6833 __FILE__, __LINE__, __func__);
6837 port_id = expander_pg0.PhysicalPort;
6838 if (sas_address_parent != ioc->sas_hba.sas_address) {
6839 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6840 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6842 mpt3sas_get_port_by_id(ioc, port_id, 0));
6843 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6844 if (!sas_expander) {
6845 rc = _scsih_expander_add(ioc, parent_handle);
6851 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6852 sas_address = le64_to_cpu(expander_pg0.SASAddress);
6853 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6854 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
6855 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6860 sas_expander = kzalloc(sizeof(struct _sas_node),
6862 if (!sas_expander) {
6863 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6864 __FILE__, __LINE__, __func__);
6868 sas_expander->handle = handle;
6869 sas_expander->num_phys = expander_pg0.NumPhys;
6870 sas_expander->sas_address_parent = sas_address_parent;
6871 sas_expander->sas_address = sas_address;
6872 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6873 if (!sas_expander->port) {
6874 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6875 __FILE__, __LINE__, __func__);
6880 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6881 handle, parent_handle,
6882 (u64)sas_expander->sas_address, sas_expander->num_phys);
6884 if (!sas_expander->num_phys)
6886 sas_expander->phy = kcalloc(sas_expander->num_phys,
6887 sizeof(struct _sas_phy), GFP_KERNEL);
6888 if (!sas_expander->phy) {
6889 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6890 __FILE__, __LINE__, __func__);
6895 INIT_LIST_HEAD(&sas_expander->sas_port_list);
6896 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6897 sas_address_parent, sas_expander->port);
6898 if (!mpt3sas_port) {
6899 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6900 __FILE__, __LINE__, __func__);
6904 sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
6905 sas_expander->rphy = mpt3sas_port->rphy;
6907 for (i = 0 ; i < sas_expander->num_phys ; i++) {
6908 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
6909 &expander_pg1, i, handle))) {
6910 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6911 __FILE__, __LINE__, __func__);
6915 sas_expander->phy[i].handle = handle;
6916 sas_expander->phy[i].phy_id = i;
6917 sas_expander->phy[i].port =
6918 mpt3sas_get_port_by_id(ioc, port_id, 0);
6920 if ((mpt3sas_transport_add_expander_phy(ioc,
6921 &sas_expander->phy[i], expander_pg1,
6922 sas_expander->parent_dev))) {
6923 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6924 __FILE__, __LINE__, __func__);
6930 if (sas_expander->enclosure_handle) {
6932 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6933 sas_expander->enclosure_handle);
6935 sas_expander->enclosure_logical_id =
6936 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6939 _scsih_expander_node_add(ioc, sas_expander);
6945 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
6946 sas_address_parent, sas_expander->port);
6947 kfree(sas_expander);
6952 * mpt3sas_expander_remove - removing expander object
6953 * @ioc: per adapter object
6954 * @sas_address: expander sas_address
6957 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
6958 struct hba_port *port)
6960 struct _sas_node *sas_expander;
6961 unsigned long flags;
6963 if (ioc->shost_recovery)
6969 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6970 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6972 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6974 _scsih_expander_node_remove(ioc, sas_expander);
6978 * _scsih_done - internal SCSI_IO callback handler.
6979 * @ioc: per adapter object
6980 * @smid: system request message index
6981 * @msix_index: MSIX table index supplied by the OS
6982 * @reply: reply message frame(lower 32bit addr)
6984 * Callback handler when sending internal generated SCSI_IO.
6985 * The callback index passed is `ioc->scsih_cb_idx`
6987 * Return: 1 meaning mf should be freed from _base_interrupt
6988 * 0 means the mf is freed from this function.
6991 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
6993 MPI2DefaultReply_t *mpi_reply;
6995 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
6996 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
6998 if (ioc->scsih_cmds.smid != smid)
7000 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
7002 memcpy(ioc->scsih_cmds.reply, mpi_reply,
7003 mpi_reply->MsgLength*4);
7004 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
7006 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
7007 complete(&ioc->scsih_cmds.done);
7014 #define MPT3_MAX_LUNS (255)
7018 * _scsih_check_access_status - check access flags
7019 * @ioc: per adapter object
7020 * @sas_address: sas address
7021 * @handle: sas device handle
7022 * @access_status: errors returned during discovery of the device
7024 * Return: 0 for success, else failure
7027 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7028 u16 handle, u8 access_status)
7033 switch (access_status) {
7034 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7035 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7038 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7039 desc = "sata capability failed";
7041 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7042 desc = "sata affiliation conflict";
7044 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7045 desc = "route not addressable";
7047 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7048 desc = "smp error not addressable";
7050 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7051 desc = "device blocked";
7053 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7054 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7055 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7056 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7057 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7058 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7059 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7060 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7061 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7062 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7063 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7064 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7065 desc = "sata initialization failed";
7075 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7076 desc, (u64)sas_address, handle);
7081 * _scsih_check_device - checking device responsiveness
7082 * @ioc: per adapter object
7083 * @parent_sas_address: sas address of parent expander or sas host
7084 * @handle: attached device handle
7085 * @phy_number: phy number
7086 * @link_rate: new link rate
7089 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7090 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7092 Mpi2ConfigReply_t mpi_reply;
7093 Mpi2SasDevicePage0_t sas_device_pg0;
7094 struct _sas_device *sas_device = NULL;
7095 struct _enclosure_node *enclosure_dev = NULL;
7097 unsigned long flags;
7099 struct scsi_target *starget;
7100 struct MPT3SAS_TARGET *sas_target_priv_data;
7102 struct hba_port *port;
7104 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7105 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7108 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7109 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7112 /* wide port handling ~ we need only handle device once for the phy that
7113 * is matched in sas device page zero
7115 if (phy_number != sas_device_pg0.PhyNum)
7118 /* check if this is end device */
7119 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7120 if (!(_scsih_is_end_device(device_info)))
7123 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7124 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7125 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7128 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7134 if (unlikely(sas_device->handle != handle)) {
7135 starget = sas_device->starget;
7136 sas_target_priv_data = starget->hostdata;
7137 starget_printk(KERN_INFO, starget,
7138 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7139 sas_device->handle, handle);
7140 sas_target_priv_data->handle = handle;
7141 sas_device->handle = handle;
7142 if (le16_to_cpu(sas_device_pg0.Flags) &
7143 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7144 sas_device->enclosure_level =
7145 sas_device_pg0.EnclosureLevel;
7146 memcpy(sas_device->connector_name,
7147 sas_device_pg0.ConnectorName, 4);
7148 sas_device->connector_name[4] = '\0';
7150 sas_device->enclosure_level = 0;
7151 sas_device->connector_name[0] = '\0';
7154 sas_device->enclosure_handle =
7155 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7156 sas_device->is_chassis_slot_valid = 0;
7157 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7158 sas_device->enclosure_handle);
7159 if (enclosure_dev) {
7160 sas_device->enclosure_logical_id =
7161 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7162 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7163 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7164 sas_device->is_chassis_slot_valid = 1;
7165 sas_device->chassis_slot =
7166 enclosure_dev->pg0.ChassisSlot;
7171 /* check if device is present */
7172 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7173 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7174 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7179 /* check if there were any issues with discovery */
7180 if (_scsih_check_access_status(ioc, sas_address, handle,
7181 sas_device_pg0.AccessStatus))
7184 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7185 _scsih_ublock_io_device(ioc, sas_address, port);
7188 sas_device_put(sas_device);
7192 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7194 sas_device_put(sas_device);
7198 * _scsih_add_device - creating sas device object
7199 * @ioc: per adapter object
7200 * @handle: sas device handle
7201 * @phy_num: phy number end device attached to
7202 * @is_pd: is this hidden raid component
7204 * Creating end device object, stored in ioc->sas_device_list.
7206 * Return: 0 for success, non-zero for failure.
7209 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7212 Mpi2ConfigReply_t mpi_reply;
7213 Mpi2SasDevicePage0_t sas_device_pg0;
7214 struct _sas_device *sas_device;
7215 struct _enclosure_node *enclosure_dev = NULL;
7221 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7222 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7223 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7224 __FILE__, __LINE__, __func__);
7228 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7229 MPI2_IOCSTATUS_MASK;
7230 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7231 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7232 __FILE__, __LINE__, __func__);
7236 /* check if this is end device */
7237 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7238 if (!(_scsih_is_end_device(device_info)))
7240 set_bit(handle, ioc->pend_os_device_add);
7241 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7243 /* check if device is present */
7244 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7245 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7246 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7251 /* check if there were any issues with discovery */
7252 if (_scsih_check_access_status(ioc, sas_address, handle,
7253 sas_device_pg0.AccessStatus))
7256 port_id = sas_device_pg0.PhysicalPort;
7257 sas_device = mpt3sas_get_sdev_by_addr(ioc,
7258 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7260 clear_bit(handle, ioc->pend_os_device_add);
7261 sas_device_put(sas_device);
7265 if (sas_device_pg0.EnclosureHandle) {
7267 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7268 le16_to_cpu(sas_device_pg0.EnclosureHandle));
7269 if (enclosure_dev == NULL)
7270 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7271 sas_device_pg0.EnclosureHandle);
7274 sas_device = kzalloc(sizeof(struct _sas_device),
7277 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7278 __FILE__, __LINE__, __func__);
7282 kref_init(&sas_device->refcount);
7283 sas_device->handle = handle;
7284 if (_scsih_get_sas_address(ioc,
7285 le16_to_cpu(sas_device_pg0.ParentDevHandle),
7286 &sas_device->sas_address_parent) != 0)
7287 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7288 __FILE__, __LINE__, __func__);
7289 sas_device->enclosure_handle =
7290 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7291 if (sas_device->enclosure_handle != 0)
7293 le16_to_cpu(sas_device_pg0.Slot);
7294 sas_device->device_info = device_info;
7295 sas_device->sas_address = sas_address;
7296 sas_device->phy = sas_device_pg0.PhyNum;
7297 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7298 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7299 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7300 if (!sas_device->port) {
7301 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7302 __FILE__, __LINE__, __func__);
7306 if (le16_to_cpu(sas_device_pg0.Flags)
7307 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7308 sas_device->enclosure_level =
7309 sas_device_pg0.EnclosureLevel;
7310 memcpy(sas_device->connector_name,
7311 sas_device_pg0.ConnectorName, 4);
7312 sas_device->connector_name[4] = '\0';
7314 sas_device->enclosure_level = 0;
7315 sas_device->connector_name[0] = '\0';
7317 /* get enclosure_logical_id & chassis_slot*/
7318 sas_device->is_chassis_slot_valid = 0;
7319 if (enclosure_dev) {
7320 sas_device->enclosure_logical_id =
7321 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7322 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7323 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7324 sas_device->is_chassis_slot_valid = 1;
7325 sas_device->chassis_slot =
7326 enclosure_dev->pg0.ChassisSlot;
7330 /* get device name */
7331 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
7333 if (ioc->wait_for_discovery_to_complete)
7334 _scsih_sas_device_init_add(ioc, sas_device);
7336 _scsih_sas_device_add(ioc, sas_device);
7339 sas_device_put(sas_device);
7344 * _scsih_remove_device - removing sas device object
7345 * @ioc: per adapter object
7346 * @sas_device: the sas_device object
7349 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7350 struct _sas_device *sas_device)
7352 struct MPT3SAS_TARGET *sas_target_priv_data;
7354 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7355 (sas_device->pfa_led_on)) {
7356 _scsih_turn_off_pfa_led(ioc, sas_device);
7357 sas_device->pfa_led_on = 0;
7361 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7363 sas_device->handle, (u64)sas_device->sas_address));
7365 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7368 if (sas_device->starget && sas_device->starget->hostdata) {
7369 sas_target_priv_data = sas_device->starget->hostdata;
7370 sas_target_priv_data->deleted = 1;
7371 _scsih_ublock_io_device(ioc, sas_device->sas_address,
7373 sas_target_priv_data->handle =
7374 MPT3SAS_INVALID_DEVICE_HANDLE;
7377 if (!ioc->hide_drives)
7378 mpt3sas_transport_port_remove(ioc,
7379 sas_device->sas_address,
7380 sas_device->sas_address_parent,
7383 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7384 sas_device->handle, (u64)sas_device->sas_address);
7386 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
7389 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7391 sas_device->handle, (u64)sas_device->sas_address));
7392 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7397 * _scsih_sas_topology_change_event_debug - debug for topology event
7398 * @ioc: per adapter object
7399 * @event_data: event data payload
7403 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7404 Mpi2EventDataSasTopologyChangeList_t *event_data)
7410 char *status_str = NULL;
7411 u8 link_rate, prev_link_rate;
7413 switch (event_data->ExpStatus) {
7414 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7417 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7418 status_str = "remove";
7420 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7422 status_str = "responding";
7424 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7425 status_str = "remove delay";
7428 status_str = "unknown status";
7431 ioc_info(ioc, "sas topology change: (%s)\n", status_str);
7432 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7433 "start_phy(%02d), count(%d)\n",
7434 le16_to_cpu(event_data->ExpanderDevHandle),
7435 le16_to_cpu(event_data->EnclosureHandle),
7436 event_data->StartPhyNum, event_data->NumEntries);
7437 for (i = 0; i < event_data->NumEntries; i++) {
7438 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7441 phy_number = event_data->StartPhyNum + i;
7442 reason_code = event_data->PHY[i].PhyStatus &
7443 MPI2_EVENT_SAS_TOPO_RC_MASK;
7444 switch (reason_code) {
7445 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7446 status_str = "target add";
7448 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7449 status_str = "target remove";
7451 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7452 status_str = "delay target remove";
7454 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7455 status_str = "link rate change";
7457 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7458 status_str = "target responding";
7461 status_str = "unknown";
7464 link_rate = event_data->PHY[i].LinkRate >> 4;
7465 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7466 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7467 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7468 handle, status_str, link_rate, prev_link_rate);
7474 * _scsih_sas_topology_change_event - handle topology changes
7475 * @ioc: per adapter object
7476 * @fw_event: The fw_event_work object
7481 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7482 struct fw_event_work *fw_event)
7485 u16 parent_handle, handle;
7487 u8 phy_number, max_phys;
7488 struct _sas_node *sas_expander;
7490 unsigned long flags;
7491 u8 link_rate, prev_link_rate;
7492 struct hba_port *port;
7493 Mpi2EventDataSasTopologyChangeList_t *event_data =
7494 (Mpi2EventDataSasTopologyChangeList_t *)
7495 fw_event->event_data;
7497 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7498 _scsih_sas_topology_change_event_debug(ioc, event_data);
7500 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7503 if (!ioc->sas_hba.num_phys)
7504 _scsih_sas_host_add(ioc);
7506 _scsih_sas_host_refresh(ioc);
7508 if (fw_event->ignore) {
7509 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
7513 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
7514 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
7516 /* handle expander add */
7517 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7518 if (_scsih_expander_add(ioc, parent_handle) != 0)
7521 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7522 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7525 sas_address = sas_expander->sas_address;
7526 max_phys = sas_expander->num_phys;
7527 port = sas_expander->port;
7528 } else if (parent_handle < ioc->sas_hba.num_phys) {
7529 sas_address = ioc->sas_hba.sas_address;
7530 max_phys = ioc->sas_hba.num_phys;
7532 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7535 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7537 /* handle siblings events */
7538 for (i = 0; i < event_data->NumEntries; i++) {
7539 if (fw_event->ignore) {
7541 ioc_info(ioc, "ignoring expander event\n"));
7544 if (ioc->remove_host || ioc->pci_error_recovery)
7546 phy_number = event_data->StartPhyNum + i;
7547 if (phy_number >= max_phys)
7549 reason_code = event_data->PHY[i].PhyStatus &
7550 MPI2_EVENT_SAS_TOPO_RC_MASK;
7551 if ((event_data->PHY[i].PhyStatus &
7552 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7553 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7555 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7558 link_rate = event_data->PHY[i].LinkRate >> 4;
7559 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7560 switch (reason_code) {
7561 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7563 if (ioc->shost_recovery)
7566 if (link_rate == prev_link_rate)
7569 mpt3sas_transport_update_links(ioc, sas_address,
7570 handle, phy_number, link_rate, port);
7572 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7575 _scsih_check_device(ioc, sas_address, handle,
7576 phy_number, link_rate);
7578 if (!test_bit(handle, ioc->pend_os_device_add))
7583 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7585 if (ioc->shost_recovery)
7588 mpt3sas_transport_update_links(ioc, sas_address,
7589 handle, phy_number, link_rate, port);
7591 _scsih_add_device(ioc, handle, phy_number, 0);
7594 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7596 _scsih_device_remove_by_handle(ioc, handle);
7601 /* handle expander removal */
7602 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7604 mpt3sas_expander_remove(ioc, sas_address, port);
7610 * _scsih_sas_device_status_change_event_debug - debug for device event
7612 * @event_data: event data payload
7616 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7617 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7619 char *reason_str = NULL;
7621 switch (event_data->ReasonCode) {
7622 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7623 reason_str = "smart data";
7625 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7626 reason_str = "unsupported device discovered";
7628 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7629 reason_str = "internal device reset";
7631 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7632 reason_str = "internal task abort";
7634 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7635 reason_str = "internal task abort set";
7637 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7638 reason_str = "internal clear task set";
7640 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7641 reason_str = "internal query task";
7643 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7644 reason_str = "sata init failure";
7646 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7647 reason_str = "internal device reset complete";
7649 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7650 reason_str = "internal task abort complete";
7652 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7653 reason_str = "internal async notification";
7655 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7656 reason_str = "expander reduced functionality";
7658 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7659 reason_str = "expander reduced functionality complete";
7662 reason_str = "unknown reason";
7665 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7666 reason_str, le16_to_cpu(event_data->DevHandle),
7667 (u64)le64_to_cpu(event_data->SASAddress),
7668 le16_to_cpu(event_data->TaskTag));
7669 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
7670 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7671 event_data->ASC, event_data->ASCQ);
7676 * _scsih_sas_device_status_change_event - handle device status change
7677 * @ioc: per adapter object
7678 * @event_data: The fw event
7682 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7683 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7685 struct MPT3SAS_TARGET *target_priv_data;
7686 struct _sas_device *sas_device;
7688 unsigned long flags;
7690 /* In MPI Revision K (0xC), the internal device reset complete was
7691 * implemented, so avoid setting tm_busy flag for older firmware.
7693 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7696 if (event_data->ReasonCode !=
7697 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7698 event_data->ReasonCode !=
7699 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7702 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7703 sas_address = le64_to_cpu(event_data->SASAddress);
7704 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7706 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
7708 if (!sas_device || !sas_device->starget)
7711 target_priv_data = sas_device->starget->hostdata;
7712 if (!target_priv_data)
7715 if (event_data->ReasonCode ==
7716 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7717 target_priv_data->tm_busy = 1;
7719 target_priv_data->tm_busy = 0;
7721 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7723 "%s tm_busy flag for handle(0x%04x)\n",
7724 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7725 target_priv_data->handle);
7729 sas_device_put(sas_device);
7731 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7736 * _scsih_check_pcie_access_status - check access flags
7737 * @ioc: per adapter object
7739 * @handle: sas device handle
7740 * @access_status: errors returned during discovery of the device
7742 * Return: 0 for success, else failure
7745 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7746 u16 handle, u8 access_status)
7751 switch (access_status) {
7752 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7753 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7756 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7757 desc = "PCIe device capability failed";
7759 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7760 desc = "PCIe device blocked";
7762 "Device with Access Status (%s): wwid(0x%016llx), "
7763 "handle(0x%04x)\n ll only be added to the internal list",
7764 desc, (u64)wwid, handle);
7767 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7768 desc = "PCIe device mem space access failed";
7770 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7771 desc = "PCIe device unsupported";
7773 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7774 desc = "PCIe device MSIx Required";
7776 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7777 desc = "PCIe device init fail max";
7779 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7780 desc = "PCIe device status unknown";
7782 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7783 desc = "nvme ready timeout";
7785 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7786 desc = "nvme device configuration unsupported";
7788 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7789 desc = "nvme identify failed";
7791 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7792 desc = "nvme qconfig failed";
7794 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7795 desc = "nvme qcreation failed";
7797 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7798 desc = "nvme eventcfg failed";
7800 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7801 desc = "nvme get feature stat failed";
7803 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7804 desc = "nvme idle timeout";
7806 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7807 desc = "nvme failure status";
7810 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7811 access_status, (u64)wwid, handle);
7818 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7819 desc, (u64)wwid, handle);
7824 * _scsih_pcie_device_remove_from_sml - removing pcie device
7825 * from SML and free up associated memory
7826 * @ioc: per adapter object
7827 * @pcie_device: the pcie_device object
7830 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7831 struct _pcie_device *pcie_device)
7833 struct MPT3SAS_TARGET *sas_target_priv_data;
7836 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7838 pcie_device->handle, (u64)pcie_device->wwid));
7839 if (pcie_device->enclosure_handle != 0)
7841 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7843 (u64)pcie_device->enclosure_logical_id,
7844 pcie_device->slot));
7845 if (pcie_device->connector_name[0] != '\0')
7847 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7849 pcie_device->enclosure_level,
7850 pcie_device->connector_name));
7852 if (pcie_device->starget && pcie_device->starget->hostdata) {
7853 sas_target_priv_data = pcie_device->starget->hostdata;
7854 sas_target_priv_data->deleted = 1;
7855 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
7856 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7859 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7860 pcie_device->handle, (u64)pcie_device->wwid);
7861 if (pcie_device->enclosure_handle != 0)
7862 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7863 (u64)pcie_device->enclosure_logical_id,
7865 if (pcie_device->connector_name[0] != '\0')
7866 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7867 pcie_device->enclosure_level,
7868 pcie_device->connector_name);
7870 if (pcie_device->starget && (pcie_device->access_status !=
7871 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
7872 scsi_remove_target(&pcie_device->starget->dev);
7874 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7876 pcie_device->handle, (u64)pcie_device->wwid));
7877 if (pcie_device->enclosure_handle != 0)
7879 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7881 (u64)pcie_device->enclosure_logical_id,
7882 pcie_device->slot));
7883 if (pcie_device->connector_name[0] != '\0')
7885 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7887 pcie_device->enclosure_level,
7888 pcie_device->connector_name));
7890 kfree(pcie_device->serial_number);
7895 * _scsih_pcie_check_device - checking device responsiveness
7896 * @ioc: per adapter object
7897 * @handle: attached device handle
7900 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7902 Mpi2ConfigReply_t mpi_reply;
7903 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7905 struct _pcie_device *pcie_device;
7907 unsigned long flags;
7908 struct scsi_target *starget;
7909 struct MPT3SAS_TARGET *sas_target_priv_data;
7912 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7913 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
7916 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7917 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7920 /* check if this is end device */
7921 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
7922 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
7925 wwid = le64_to_cpu(pcie_device_pg0.WWID);
7926 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7927 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7930 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7934 if (unlikely(pcie_device->handle != handle)) {
7935 starget = pcie_device->starget;
7936 sas_target_priv_data = starget->hostdata;
7937 pcie_device->access_status = pcie_device_pg0.AccessStatus;
7938 starget_printk(KERN_INFO, starget,
7939 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7940 pcie_device->handle, handle);
7941 sas_target_priv_data->handle = handle;
7942 pcie_device->handle = handle;
7944 if (le32_to_cpu(pcie_device_pg0.Flags) &
7945 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
7946 pcie_device->enclosure_level =
7947 pcie_device_pg0.EnclosureLevel;
7948 memcpy(&pcie_device->connector_name[0],
7949 &pcie_device_pg0.ConnectorName[0], 4);
7951 pcie_device->enclosure_level = 0;
7952 pcie_device->connector_name[0] = '\0';
7956 /* check if device is present */
7957 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
7958 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
7959 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
7961 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7962 pcie_device_put(pcie_device);
7966 /* check if there were any issues with discovery */
7967 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
7968 pcie_device_pg0.AccessStatus)) {
7969 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7970 pcie_device_put(pcie_device);
7974 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7975 pcie_device_put(pcie_device);
7977 _scsih_ublock_io_device(ioc, wwid, NULL);
7983 * _scsih_pcie_add_device - creating pcie device object
7984 * @ioc: per adapter object
7985 * @handle: pcie device handle
7987 * Creating end device object, stored in ioc->pcie_device_list.
7989 * Return: 1 means queue the event later, 0 means complete the event
7992 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7994 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7995 Mpi26PCIeDevicePage2_t pcie_device_pg2;
7996 Mpi2ConfigReply_t mpi_reply;
7997 struct _pcie_device *pcie_device;
7998 struct _enclosure_node *enclosure_dev;
8002 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8003 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
8004 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8005 __FILE__, __LINE__, __func__);
8008 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8009 MPI2_IOCSTATUS_MASK;
8010 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8011 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8012 __FILE__, __LINE__, __func__);
8016 set_bit(handle, ioc->pend_os_device_add);
8017 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8019 /* check if device is present */
8020 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8021 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8022 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8027 /* check if there were any issues with discovery */
8028 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8029 pcie_device_pg0.AccessStatus))
8032 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8033 (pcie_device_pg0.DeviceInfo))))
8036 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8038 clear_bit(handle, ioc->pend_os_device_add);
8039 pcie_device_put(pcie_device);
8043 /* PCIe Device Page 2 contains read-only information about a
8044 * specific NVMe device; therefore, this page is only
8045 * valid for NVMe devices and skip for pcie devices of type scsi.
8047 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8048 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8049 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8050 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8053 "failure at %s:%d/%s()!\n", __FILE__,
8054 __LINE__, __func__);
8058 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8059 MPI2_IOCSTATUS_MASK;
8060 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8062 "failure at %s:%d/%s()!\n", __FILE__,
8063 __LINE__, __func__);
8068 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8070 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8071 __FILE__, __LINE__, __func__);
8075 kref_init(&pcie_device->refcount);
8076 pcie_device->id = ioc->pcie_target_id++;
8077 pcie_device->channel = PCIE_CHANNEL;
8078 pcie_device->handle = handle;
8079 pcie_device->access_status = pcie_device_pg0.AccessStatus;
8080 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8081 pcie_device->wwid = wwid;
8082 pcie_device->port_num = pcie_device_pg0.PortNum;
8083 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8084 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8086 pcie_device->enclosure_handle =
8087 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8088 if (pcie_device->enclosure_handle != 0)
8089 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8091 if (le32_to_cpu(pcie_device_pg0.Flags) &
8092 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8093 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8094 memcpy(&pcie_device->connector_name[0],
8095 &pcie_device_pg0.ConnectorName[0], 4);
8097 pcie_device->enclosure_level = 0;
8098 pcie_device->connector_name[0] = '\0';
8101 /* get enclosure_logical_id */
8102 if (pcie_device->enclosure_handle) {
8104 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8105 pcie_device->enclosure_handle);
8107 pcie_device->enclosure_logical_id =
8108 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8110 /* TODO -- Add device name once FW supports it */
8111 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8112 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8113 pcie_device->nvme_mdts =
8114 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
8115 pcie_device->shutdown_latency =
8116 le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8118 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
8119 * if drive's RTD3 Entry Latency is greater then IOC's
8120 * max_shutdown_latency.
8122 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8123 ioc->max_shutdown_latency =
8124 pcie_device->shutdown_latency;
8125 if (pcie_device_pg2.ControllerResetTO)
8126 pcie_device->reset_timeout =
8127 pcie_device_pg2.ControllerResetTO;
8129 pcie_device->reset_timeout = 30;
8131 pcie_device->reset_timeout = 30;
8133 if (ioc->wait_for_discovery_to_complete)
8134 _scsih_pcie_device_init_add(ioc, pcie_device);
8136 _scsih_pcie_device_add(ioc, pcie_device);
8138 pcie_device_put(pcie_device);
8143 * _scsih_pcie_topology_change_event_debug - debug for topology
8145 * @ioc: per adapter object
8146 * @event_data: event data payload
8150 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8151 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8157 char *status_str = NULL;
8158 u8 link_rate, prev_link_rate;
8160 switch (event_data->SwitchStatus) {
8161 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8164 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8165 status_str = "remove";
8167 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8169 status_str = "responding";
8171 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8172 status_str = "remove delay";
8175 status_str = "unknown status";
8178 ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
8179 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8180 "start_port(%02d), count(%d)\n",
8181 le16_to_cpu(event_data->SwitchDevHandle),
8182 le16_to_cpu(event_data->EnclosureHandle),
8183 event_data->StartPortNum, event_data->NumEntries);
8184 for (i = 0; i < event_data->NumEntries; i++) {
8186 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8189 port_number = event_data->StartPortNum + i;
8190 reason_code = event_data->PortEntry[i].PortStatus;
8191 switch (reason_code) {
8192 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8193 status_str = "target add";
8195 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8196 status_str = "target remove";
8198 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8199 status_str = "delay target remove";
8201 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8202 status_str = "link rate change";
8204 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8205 status_str = "target responding";
8208 status_str = "unknown";
8211 link_rate = event_data->PortEntry[i].CurrentPortInfo &
8212 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8213 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8214 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8215 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8216 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
8217 handle, status_str, link_rate, prev_link_rate);
8222 * _scsih_pcie_topology_change_event - handle PCIe topology
8224 * @ioc: per adapter object
8225 * @fw_event: The fw_event_work object
8230 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8231 struct fw_event_work *fw_event)
8236 u8 link_rate, prev_link_rate;
8237 unsigned long flags;
8239 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8240 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8241 struct _pcie_device *pcie_device;
8243 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8244 _scsih_pcie_topology_change_event_debug(ioc, event_data);
8246 if (ioc->shost_recovery || ioc->remove_host ||
8247 ioc->pci_error_recovery)
8250 if (fw_event->ignore) {
8251 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
8255 /* handle siblings events */
8256 for (i = 0; i < event_data->NumEntries; i++) {
8257 if (fw_event->ignore) {
8259 ioc_info(ioc, "ignoring switch event\n"));
8262 if (ioc->remove_host || ioc->pci_error_recovery)
8264 reason_code = event_data->PortEntry[i].PortStatus;
8266 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8270 link_rate = event_data->PortEntry[i].CurrentPortInfo
8271 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8272 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8273 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8275 switch (reason_code) {
8276 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8277 if (ioc->shost_recovery)
8279 if (link_rate == prev_link_rate)
8281 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8284 _scsih_pcie_check_device(ioc, handle);
8286 /* This code after this point handles the test case
8287 * where a device has been added, however its returning
8288 * BUSY for sometime. Then before the Device Missing
8289 * Delay expires and the device becomes READY, the
8290 * device is removed and added back.
8292 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8293 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8294 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8297 pcie_device_put(pcie_device);
8301 if (!test_bit(handle, ioc->pend_os_device_add))
8305 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8307 event_data->PortEntry[i].PortStatus &= 0xF0;
8308 event_data->PortEntry[i].PortStatus |=
8309 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
8311 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8312 if (ioc->shost_recovery)
8314 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8317 rc = _scsih_pcie_add_device(ioc, handle);
8319 /* mark entry vacant */
8320 /* TODO This needs to be reviewed and fixed,
8321 * we dont have an entry
8322 * to make an event void like vacant
8324 event_data->PortEntry[i].PortStatus |=
8325 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8328 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8329 _scsih_pcie_device_remove_by_handle(ioc, handle);
8336 * _scsih_pcie_device_status_change_event_debug - debug for device event
8338 * @event_data: event data payload
8342 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8343 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8345 char *reason_str = NULL;
8347 switch (event_data->ReasonCode) {
8348 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8349 reason_str = "smart data";
8351 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8352 reason_str = "unsupported device discovered";
8354 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8355 reason_str = "internal device reset";
8357 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8358 reason_str = "internal task abort";
8360 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8361 reason_str = "internal task abort set";
8363 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8364 reason_str = "internal clear task set";
8366 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8367 reason_str = "internal query task";
8369 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8370 reason_str = "device init failure";
8372 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8373 reason_str = "internal device reset complete";
8375 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8376 reason_str = "internal task abort complete";
8378 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8379 reason_str = "internal async notification";
8381 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8382 reason_str = "pcie hot reset failed";
8385 reason_str = "unknown reason";
8389 ioc_info(ioc, "PCIE device status change: (%s)\n"
8390 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8391 reason_str, le16_to_cpu(event_data->DevHandle),
8392 (u64)le64_to_cpu(event_data->WWID),
8393 le16_to_cpu(event_data->TaskTag));
8394 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
8395 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8396 event_data->ASC, event_data->ASCQ);
8401 * _scsih_pcie_device_status_change_event - handle device status
8403 * @ioc: per adapter object
8404 * @fw_event: The fw_event_work object
8408 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8409 struct fw_event_work *fw_event)
8411 struct MPT3SAS_TARGET *target_priv_data;
8412 struct _pcie_device *pcie_device;
8414 unsigned long flags;
8415 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8416 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8417 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8418 _scsih_pcie_device_status_change_event_debug(ioc,
8421 if (event_data->ReasonCode !=
8422 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8423 event_data->ReasonCode !=
8424 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8427 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8428 wwid = le64_to_cpu(event_data->WWID);
8429 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8431 if (!pcie_device || !pcie_device->starget)
8434 target_priv_data = pcie_device->starget->hostdata;
8435 if (!target_priv_data)
8438 if (event_data->ReasonCode ==
8439 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8440 target_priv_data->tm_busy = 1;
8442 target_priv_data->tm_busy = 0;
8445 pcie_device_put(pcie_device);
8447 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8451 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
8453 * @ioc: per adapter object
8454 * @event_data: event data payload
8458 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8459 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8461 char *reason_str = NULL;
8463 switch (event_data->ReasonCode) {
8464 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8465 reason_str = "enclosure add";
8467 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8468 reason_str = "enclosure remove";
8471 reason_str = "unknown reason";
8475 ioc_info(ioc, "enclosure status change: (%s)\n"
8476 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8478 le16_to_cpu(event_data->EnclosureHandle),
8479 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8480 le16_to_cpu(event_data->StartSlot));
8484 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
8485 * @ioc: per adapter object
8486 * @fw_event: The fw_event_work object
8490 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8491 struct fw_event_work *fw_event)
8493 Mpi2ConfigReply_t mpi_reply;
8494 struct _enclosure_node *enclosure_dev = NULL;
8495 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8496 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8498 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8500 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8501 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
8502 (Mpi2EventDataSasEnclDevStatusChange_t *)
8503 fw_event->event_data);
8504 if (ioc->shost_recovery)
8507 if (enclosure_handle)
8509 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8511 switch (event_data->ReasonCode) {
8512 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8513 if (!enclosure_dev) {
8515 kzalloc(sizeof(struct _enclosure_node),
8517 if (!enclosure_dev) {
8518 ioc_info(ioc, "failure at %s:%d/%s()!\n",
8519 __FILE__, __LINE__, __func__);
8522 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8523 &enclosure_dev->pg0,
8524 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8527 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8528 MPI2_IOCSTATUS_MASK)) {
8529 kfree(enclosure_dev);
8533 list_add_tail(&enclosure_dev->list,
8534 &ioc->enclosure_list);
8537 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8538 if (enclosure_dev) {
8539 list_del(&enclosure_dev->list);
8540 kfree(enclosure_dev);
8549 * _scsih_sas_broadcast_primitive_event - handle broadcast events
8550 * @ioc: per adapter object
8551 * @fw_event: The fw_event_work object
8555 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8556 struct fw_event_work *fw_event)
8558 struct scsi_cmnd *scmd;
8559 struct scsi_device *sdev;
8560 struct scsiio_tracker *st;
8563 struct MPT3SAS_DEVICE *sas_device_priv_data;
8564 u32 termination_count;
8566 Mpi2SCSITaskManagementReply_t *mpi_reply;
8567 Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8568 (Mpi2EventDataSasBroadcastPrimitive_t *)
8569 fw_event->event_data;
8571 unsigned long flags;
8574 u8 task_abort_retries;
8576 mutex_lock(&ioc->tm_cmds.mutex);
8577 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8578 __func__, event_data->PhyNum, event_data->PortWidth);
8580 _scsih_block_io_all_device(ioc);
8582 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8583 mpi_reply = ioc->tm_cmds.reply;
8584 broadcast_aen_retry:
8586 /* sanity checks for retrying this loop */
8587 if (max_retries++ == 5) {
8588 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
8590 } else if (max_retries > 1)
8592 ioc_info(ioc, "%s: %d retry\n",
8593 __func__, max_retries - 1));
8595 termination_count = 0;
8597 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8598 if (ioc->shost_recovery)
8600 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
8603 st = scsi_cmd_priv(scmd);
8604 sdev = scmd->device;
8605 sas_device_priv_data = sdev->hostdata;
8606 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8608 /* skip hidden raid components */
8609 if (sas_device_priv_data->sas_target->flags &
8610 MPT_TARGET_FLAGS_RAID_COMPONENT)
8613 if (sas_device_priv_data->sas_target->flags &
8614 MPT_TARGET_FLAGS_VOLUME)
8616 /* skip PCIe devices */
8617 if (sas_device_priv_data->sas_target->flags &
8618 MPT_TARGET_FLAGS_PCIE_DEVICE)
8621 handle = sas_device_priv_data->sas_target->handle;
8622 lun = sas_device_priv_data->lun;
8625 if (ioc->shost_recovery)
8628 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8629 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
8630 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
8631 st->msix_io, 30, 0);
8633 sdev_printk(KERN_WARNING, sdev,
8634 "mpt3sas_scsih_issue_tm: FAILED when sending "
8635 "QUERY_TASK: scmd(%p)\n", scmd);
8636 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8637 goto broadcast_aen_retry;
8639 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8640 & MPI2_IOCSTATUS_MASK;
8641 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8642 sdev_printk(KERN_WARNING, sdev,
8643 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8645 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8646 goto broadcast_aen_retry;
8649 /* see if IO is still owned by IOC and target */
8650 if (mpi_reply->ResponseCode ==
8651 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8652 mpi_reply->ResponseCode ==
8653 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8654 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8657 task_abort_retries = 0;
8659 if (task_abort_retries++ == 60) {
8661 ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8663 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8664 goto broadcast_aen_retry;
8667 if (ioc->shost_recovery)
8670 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8671 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8672 st->smid, st->msix_io, 30, 0);
8673 if (r == FAILED || st->cb_idx != 0xFF) {
8674 sdev_printk(KERN_WARNING, sdev,
8675 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8676 "scmd(%p)\n", scmd);
8680 if (task_abort_retries > 1)
8681 sdev_printk(KERN_WARNING, sdev,
8682 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8684 task_abort_retries - 1, scmd);
8686 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8687 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8690 if (ioc->broadcast_aen_pending) {
8693 "%s: loop back due to pending AEN\n",
8695 ioc->broadcast_aen_pending = 0;
8696 goto broadcast_aen_retry;
8700 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8704 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8705 __func__, query_count, termination_count));
8707 ioc->broadcast_aen_busy = 0;
8708 if (!ioc->shost_recovery)
8709 _scsih_ublock_io_all_device(ioc);
8710 mutex_unlock(&ioc->tm_cmds.mutex);
8714 * _scsih_sas_discovery_event - handle discovery events
8715 * @ioc: per adapter object
8716 * @fw_event: The fw_event_work object
8720 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8721 struct fw_event_work *fw_event)
8723 Mpi2EventDataSasDiscovery_t *event_data =
8724 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
8726 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
8727 ioc_info(ioc, "discovery event: (%s)",
8728 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8730 if (event_data->DiscoveryStatus)
8731 pr_cont("discovery_status(0x%08x)",
8732 le32_to_cpu(event_data->DiscoveryStatus));
8736 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8737 !ioc->sas_hba.num_phys) {
8738 if (disable_discovery > 0 && ioc->shost_recovery) {
8739 /* Wait for the reset to complete */
8740 while (ioc->shost_recovery)
8743 _scsih_sas_host_add(ioc);
8748 * _scsih_sas_device_discovery_error_event - display SAS device discovery error
8750 * @ioc: per adapter object
8751 * @fw_event: The fw_event_work object
8755 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8756 struct fw_event_work *fw_event)
8758 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8759 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8761 switch (event_data->ReasonCode) {
8762 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
8763 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8764 le16_to_cpu(event_data->DevHandle),
8765 (u64)le64_to_cpu(event_data->SASAddress),
8766 event_data->PhysicalPort);
8768 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
8769 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8770 le16_to_cpu(event_data->DevHandle),
8771 (u64)le64_to_cpu(event_data->SASAddress),
8772 event_data->PhysicalPort);
8780 * _scsih_pcie_enumeration_event - handle enumeration events
8781 * @ioc: per adapter object
8782 * @fw_event: The fw_event_work object
8786 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8787 struct fw_event_work *fw_event)
8789 Mpi26EventDataPCIeEnumeration_t *event_data =
8790 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8792 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8795 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8796 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8797 "started" : "completed",
8799 if (event_data->EnumerationStatus)
8800 pr_cont("enumeration_status(0x%08x)",
8801 le32_to_cpu(event_data->EnumerationStatus));
8806 * _scsih_ir_fastpath - turn on fastpath for IR physdisk
8807 * @ioc: per adapter object
8808 * @handle: device handle for physical disk
8809 * @phys_disk_num: physical disk number
8811 * Return: 0 for success, else failure.
8814 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8816 Mpi2RaidActionRequest_t *mpi_request;
8817 Mpi2RaidActionReply_t *mpi_reply;
8824 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8827 mutex_lock(&ioc->scsih_cmds.mutex);
8829 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
8830 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
8834 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8836 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8838 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
8839 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8844 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8845 ioc->scsih_cmds.smid = smid;
8846 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8848 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8849 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8850 mpi_request->PhysDiskNum = phys_disk_num;
8853 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8854 handle, phys_disk_num));
8856 init_completion(&ioc->scsih_cmds.done);
8857 ioc->put_smid_default(ioc, smid);
8858 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8860 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
8861 mpt3sas_check_cmd_timeout(ioc,
8862 ioc->scsih_cmds.status, mpi_request,
8863 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
8868 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8870 mpi_reply = ioc->scsih_cmds.reply;
8871 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8872 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8873 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
8876 ioc_status &= MPI2_IOCSTATUS_MASK;
8877 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8879 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8880 ioc_status, log_info));
8884 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8888 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8889 mutex_unlock(&ioc->scsih_cmds.mutex);
8892 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8897 * _scsih_reprobe_lun - reprobing lun
8898 * @sdev: scsi device struct
8899 * @no_uld_attach: sdev->no_uld_attach flag setting
8903 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
8905 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
8906 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
8907 sdev->no_uld_attach ? "hiding" : "exposing");
8908 WARN_ON(scsi_device_reprobe(sdev));
8912 * _scsih_sas_volume_add - add new volume
8913 * @ioc: per adapter object
8914 * @element: IR config element data
8918 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
8919 Mpi2EventIrConfigElement_t *element)
8921 struct _raid_device *raid_device;
8922 unsigned long flags;
8924 u16 handle = le16_to_cpu(element->VolDevHandle);
8927 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8929 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8930 __FILE__, __LINE__, __func__);
8934 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8935 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
8936 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8941 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8943 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8944 __FILE__, __LINE__, __func__);
8948 raid_device->id = ioc->sas_id++;
8949 raid_device->channel = RAID_CHANNEL;
8950 raid_device->handle = handle;
8951 raid_device->wwid = wwid;
8952 _scsih_raid_device_add(ioc, raid_device);
8953 if (!ioc->wait_for_discovery_to_complete) {
8954 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8955 raid_device->id, 0);
8957 _scsih_raid_device_remove(ioc, raid_device);
8959 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8960 _scsih_determine_boot_device(ioc, raid_device, 1);
8961 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8966 * _scsih_sas_volume_delete - delete volume
8967 * @ioc: per adapter object
8968 * @handle: volume device handle
8972 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8974 struct _raid_device *raid_device;
8975 unsigned long flags;
8976 struct MPT3SAS_TARGET *sas_target_priv_data;
8977 struct scsi_target *starget = NULL;
8979 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8980 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8982 if (raid_device->starget) {
8983 starget = raid_device->starget;
8984 sas_target_priv_data = starget->hostdata;
8985 sas_target_priv_data->deleted = 1;
8987 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
8988 raid_device->handle, (u64)raid_device->wwid);
8989 list_del(&raid_device->list);
8992 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8994 scsi_remove_target(&starget->dev);
8998 * _scsih_sas_pd_expose - expose pd component to /dev/sdX
8999 * @ioc: per adapter object
9000 * @element: IR config element data
9004 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
9005 Mpi2EventIrConfigElement_t *element)
9007 struct _sas_device *sas_device;
9008 struct scsi_target *starget = NULL;
9009 struct MPT3SAS_TARGET *sas_target_priv_data;
9010 unsigned long flags;
9011 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9013 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9014 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9016 sas_device->volume_handle = 0;
9017 sas_device->volume_wwid = 0;
9018 clear_bit(handle, ioc->pd_handles);
9019 if (sas_device->starget && sas_device->starget->hostdata) {
9020 starget = sas_device->starget;
9021 sas_target_priv_data = starget->hostdata;
9022 sas_target_priv_data->flags &=
9023 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9026 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9030 /* exposing raid component */
9032 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
9034 sas_device_put(sas_device);
9038 * _scsih_sas_pd_hide - hide pd component from /dev/sdX
9039 * @ioc: per adapter object
9040 * @element: IR config element data
9044 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9045 Mpi2EventIrConfigElement_t *element)
9047 struct _sas_device *sas_device;
9048 struct scsi_target *starget = NULL;
9049 struct MPT3SAS_TARGET *sas_target_priv_data;
9050 unsigned long flags;
9051 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9052 u16 volume_handle = 0;
9053 u64 volume_wwid = 0;
9055 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9057 mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9060 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9061 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9063 set_bit(handle, ioc->pd_handles);
9064 if (sas_device->starget && sas_device->starget->hostdata) {
9065 starget = sas_device->starget;
9066 sas_target_priv_data = starget->hostdata;
9067 sas_target_priv_data->flags |=
9068 MPT_TARGET_FLAGS_RAID_COMPONENT;
9069 sas_device->volume_handle = volume_handle;
9070 sas_device->volume_wwid = volume_wwid;
9073 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9077 /* hiding raid component */
9078 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9081 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
9083 sas_device_put(sas_device);
9087 * _scsih_sas_pd_delete - delete pd component
9088 * @ioc: per adapter object
9089 * @element: IR config element data
9093 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9094 Mpi2EventIrConfigElement_t *element)
9096 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9098 _scsih_device_remove_by_handle(ioc, handle);
9102 * _scsih_sas_pd_add - remove pd component
9103 * @ioc: per adapter object
9104 * @element: IR config element data
9108 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9109 Mpi2EventIrConfigElement_t *element)
9111 struct _sas_device *sas_device;
9112 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9113 Mpi2ConfigReply_t mpi_reply;
9114 Mpi2SasDevicePage0_t sas_device_pg0;
9119 set_bit(handle, ioc->pd_handles);
9121 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9123 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9124 sas_device_put(sas_device);
9128 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9129 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
9130 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9131 __FILE__, __LINE__, __func__);
9135 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9136 MPI2_IOCSTATUS_MASK;
9137 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9138 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9139 __FILE__, __LINE__, __func__);
9143 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9144 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9145 mpt3sas_transport_update_links(ioc, sas_address, handle,
9146 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9147 mpt3sas_get_port_by_id(ioc,
9148 sas_device_pg0.PhysicalPort, 0));
9150 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9151 _scsih_add_device(ioc, handle, 0, 1);
9155 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
9156 * @ioc: per adapter object
9157 * @event_data: event data payload
9161 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9162 Mpi2EventDataIrConfigChangeList_t *event_data)
9164 Mpi2EventIrConfigElement_t *element;
9167 char *reason_str = NULL, *element_str = NULL;
9169 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9171 ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9172 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9173 "foreign" : "native",
9174 event_data->NumElements);
9175 for (i = 0; i < event_data->NumElements; i++, element++) {
9176 switch (element->ReasonCode) {
9177 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9180 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9181 reason_str = "remove";
9183 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9184 reason_str = "no change";
9186 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9187 reason_str = "hide";
9189 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9190 reason_str = "unhide";
9192 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9193 reason_str = "volume_created";
9195 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9196 reason_str = "volume_deleted";
9198 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9199 reason_str = "pd_created";
9201 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9202 reason_str = "pd_deleted";
9205 reason_str = "unknown reason";
9208 element_type = le16_to_cpu(element->ElementFlags) &
9209 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9210 switch (element_type) {
9211 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9212 element_str = "volume";
9214 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9215 element_str = "phys disk";
9217 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9218 element_str = "hot spare";
9221 element_str = "unknown element";
9224 pr_info("\t(%s:%s), vol handle(0x%04x), " \
9225 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9226 reason_str, le16_to_cpu(element->VolDevHandle),
9227 le16_to_cpu(element->PhysDiskDevHandle),
9228 element->PhysDiskNum);
9233 * _scsih_sas_ir_config_change_event - handle ir configuration change events
9234 * @ioc: per adapter object
9235 * @fw_event: The fw_event_work object
9239 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9240 struct fw_event_work *fw_event)
9242 Mpi2EventIrConfigElement_t *element;
9245 Mpi2EventDataIrConfigChangeList_t *event_data =
9246 (Mpi2EventDataIrConfigChangeList_t *)
9247 fw_event->event_data;
9249 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9250 (!ioc->hide_ir_msg))
9251 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
9253 foreign_config = (le32_to_cpu(event_data->Flags) &
9254 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9256 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9257 if (ioc->shost_recovery &&
9258 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
9259 for (i = 0; i < event_data->NumElements; i++, element++) {
9260 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9261 _scsih_ir_fastpath(ioc,
9262 le16_to_cpu(element->PhysDiskDevHandle),
9263 element->PhysDiskNum);
9268 for (i = 0; i < event_data->NumElements; i++, element++) {
9270 switch (element->ReasonCode) {
9271 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9272 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9273 if (!foreign_config)
9274 _scsih_sas_volume_add(ioc, element);
9276 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9277 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9278 if (!foreign_config)
9279 _scsih_sas_volume_delete(ioc,
9280 le16_to_cpu(element->VolDevHandle));
9282 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9283 if (!ioc->is_warpdrive)
9284 _scsih_sas_pd_hide(ioc, element);
9286 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9287 if (!ioc->is_warpdrive)
9288 _scsih_sas_pd_expose(ioc, element);
9290 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9291 if (!ioc->is_warpdrive)
9292 _scsih_sas_pd_add(ioc, element);
9294 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9295 if (!ioc->is_warpdrive)
9296 _scsih_sas_pd_delete(ioc, element);
9303 * _scsih_sas_ir_volume_event - IR volume event
9304 * @ioc: per adapter object
9305 * @fw_event: The fw_event_work object
9309 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9310 struct fw_event_work *fw_event)
9313 unsigned long flags;
9314 struct _raid_device *raid_device;
9318 Mpi2EventDataIrVolume_t *event_data =
9319 (Mpi2EventDataIrVolume_t *) fw_event->event_data;
9321 if (ioc->shost_recovery)
9324 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9327 handle = le16_to_cpu(event_data->VolDevHandle);
9328 state = le32_to_cpu(event_data->NewValue);
9329 if (!ioc->hide_ir_msg)
9331 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9333 le32_to_cpu(event_data->PreviousValue),
9336 case MPI2_RAID_VOL_STATE_MISSING:
9337 case MPI2_RAID_VOL_STATE_FAILED:
9338 _scsih_sas_volume_delete(ioc, handle);
9341 case MPI2_RAID_VOL_STATE_ONLINE:
9342 case MPI2_RAID_VOL_STATE_DEGRADED:
9343 case MPI2_RAID_VOL_STATE_OPTIMAL:
9345 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9346 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9347 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9352 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9354 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9355 __FILE__, __LINE__, __func__);
9359 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9361 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9362 __FILE__, __LINE__, __func__);
9366 raid_device->id = ioc->sas_id++;
9367 raid_device->channel = RAID_CHANNEL;
9368 raid_device->handle = handle;
9369 raid_device->wwid = wwid;
9370 _scsih_raid_device_add(ioc, raid_device);
9371 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9372 raid_device->id, 0);
9374 _scsih_raid_device_remove(ioc, raid_device);
9377 case MPI2_RAID_VOL_STATE_INITIALIZING:
9384 * _scsih_sas_ir_physical_disk_event - PD event
9385 * @ioc: per adapter object
9386 * @fw_event: The fw_event_work object
9390 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9391 struct fw_event_work *fw_event)
9393 u16 handle, parent_handle;
9395 struct _sas_device *sas_device;
9396 Mpi2ConfigReply_t mpi_reply;
9397 Mpi2SasDevicePage0_t sas_device_pg0;
9399 Mpi2EventDataIrPhysicalDisk_t *event_data =
9400 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
9403 if (ioc->shost_recovery)
9406 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9409 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9410 state = le32_to_cpu(event_data->NewValue);
9412 if (!ioc->hide_ir_msg)
9414 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9416 le32_to_cpu(event_data->PreviousValue),
9420 case MPI2_RAID_PD_STATE_ONLINE:
9421 case MPI2_RAID_PD_STATE_DEGRADED:
9422 case MPI2_RAID_PD_STATE_REBUILDING:
9423 case MPI2_RAID_PD_STATE_OPTIMAL:
9424 case MPI2_RAID_PD_STATE_HOT_SPARE:
9426 if (!ioc->is_warpdrive)
9427 set_bit(handle, ioc->pd_handles);
9429 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9431 sas_device_put(sas_device);
9435 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9436 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9438 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9439 __FILE__, __LINE__, __func__);
9443 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9444 MPI2_IOCSTATUS_MASK;
9445 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9446 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9447 __FILE__, __LINE__, __func__);
9451 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9452 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9453 mpt3sas_transport_update_links(ioc, sas_address, handle,
9454 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9455 mpt3sas_get_port_by_id(ioc,
9456 sas_device_pg0.PhysicalPort, 0));
9458 _scsih_add_device(ioc, handle, 0, 1);
9462 case MPI2_RAID_PD_STATE_OFFLINE:
9463 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9464 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9471 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
9472 * @ioc: per adapter object
9473 * @event_data: event data payload
9477 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9478 Mpi2EventDataIrOperationStatus_t *event_data)
9480 char *reason_str = NULL;
9482 switch (event_data->RAIDOperation) {
9483 case MPI2_EVENT_IR_RAIDOP_RESYNC:
9484 reason_str = "resync";
9486 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9487 reason_str = "online capacity expansion";
9489 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9490 reason_str = "consistency check";
9492 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9493 reason_str = "background init";
9495 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9496 reason_str = "make data consistent";
9503 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9505 le16_to_cpu(event_data->VolDevHandle),
9506 event_data->PercentComplete);
9510 * _scsih_sas_ir_operation_status_event - handle RAID operation events
9511 * @ioc: per adapter object
9512 * @fw_event: The fw_event_work object
9516 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9517 struct fw_event_work *fw_event)
9519 Mpi2EventDataIrOperationStatus_t *event_data =
9520 (Mpi2EventDataIrOperationStatus_t *)
9521 fw_event->event_data;
9522 static struct _raid_device *raid_device;
9523 unsigned long flags;
9526 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9527 (!ioc->hide_ir_msg))
9528 _scsih_sas_ir_operation_status_event_debug(ioc,
9531 /* code added for raid transport support */
9532 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9534 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9535 handle = le16_to_cpu(event_data->VolDevHandle);
9536 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9538 raid_device->percent_complete =
9539 event_data->PercentComplete;
9540 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9545 * _scsih_prep_device_scan - initialize parameters prior to device scan
9546 * @ioc: per adapter object
9548 * Set the deleted flag prior to device scan. If the device is found during
9549 * the scan, then we clear the deleted flag.
9552 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9554 struct MPT3SAS_DEVICE *sas_device_priv_data;
9555 struct scsi_device *sdev;
9557 shost_for_each_device(sdev, ioc->shost) {
9558 sas_device_priv_data = sdev->hostdata;
9559 if (sas_device_priv_data && sas_device_priv_data->sas_target)
9560 sas_device_priv_data->sas_target->deleted = 1;
9565 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
9566 * @ioc: per adapter object
9567 * @sas_device_pg0: SAS Device page 0
9569 * After host reset, find out whether devices are still responding.
9570 * Used in _scsih_remove_unresponsive_sas_devices.
9573 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9574 Mpi2SasDevicePage0_t *sas_device_pg0)
9576 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9577 struct scsi_target *starget;
9578 struct _sas_device *sas_device = NULL;
9579 struct _enclosure_node *enclosure_dev = NULL;
9580 unsigned long flags;
9581 struct hba_port *port = mpt3sas_get_port_by_id(
9582 ioc, sas_device_pg0->PhysicalPort, 0);
9584 if (sas_device_pg0->EnclosureHandle) {
9586 mpt3sas_scsih_enclosure_find_by_handle(ioc,
9587 le16_to_cpu(sas_device_pg0->EnclosureHandle));
9588 if (enclosure_dev == NULL)
9589 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9590 sas_device_pg0->EnclosureHandle);
9592 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9593 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
9594 if (sas_device->sas_address != le64_to_cpu(
9595 sas_device_pg0->SASAddress))
9597 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9599 if (sas_device->port != port)
9601 sas_device->responding = 1;
9602 starget = sas_device->starget;
9603 if (starget && starget->hostdata) {
9604 sas_target_priv_data = starget->hostdata;
9605 sas_target_priv_data->tm_busy = 0;
9606 sas_target_priv_data->deleted = 0;
9608 sas_target_priv_data = NULL;
9610 starget_printk(KERN_INFO, starget,
9611 "handle(0x%04x), sas_addr(0x%016llx)\n",
9612 le16_to_cpu(sas_device_pg0->DevHandle),
9613 (unsigned long long)
9614 sas_device->sas_address);
9616 if (sas_device->enclosure_handle != 0)
9617 starget_printk(KERN_INFO, starget,
9618 "enclosure logical id(0x%016llx), slot(%d)\n",
9619 (unsigned long long)
9620 sas_device->enclosure_logical_id,
9623 if (le16_to_cpu(sas_device_pg0->Flags) &
9624 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9625 sas_device->enclosure_level =
9626 sas_device_pg0->EnclosureLevel;
9627 memcpy(&sas_device->connector_name[0],
9628 &sas_device_pg0->ConnectorName[0], 4);
9630 sas_device->enclosure_level = 0;
9631 sas_device->connector_name[0] = '\0';
9634 sas_device->enclosure_handle =
9635 le16_to_cpu(sas_device_pg0->EnclosureHandle);
9636 sas_device->is_chassis_slot_valid = 0;
9637 if (enclosure_dev) {
9638 sas_device->enclosure_logical_id = le64_to_cpu(
9639 enclosure_dev->pg0.EnclosureLogicalID);
9640 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9641 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9642 sas_device->is_chassis_slot_valid = 1;
9643 sas_device->chassis_slot =
9644 enclosure_dev->pg0.ChassisSlot;
9648 if (sas_device->handle == le16_to_cpu(
9649 sas_device_pg0->DevHandle))
9651 pr_info("\thandle changed from(0x%04x)!!!\n",
9652 sas_device->handle);
9653 sas_device->handle = le16_to_cpu(
9654 sas_device_pg0->DevHandle);
9655 if (sas_target_priv_data)
9656 sas_target_priv_data->handle =
9657 le16_to_cpu(sas_device_pg0->DevHandle);
9661 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9665 * _scsih_create_enclosure_list_after_reset - Free Existing list,
9666 * And create enclosure list by scanning all Enclosure Page(0)s
9667 * @ioc: per adapter object
9670 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9672 struct _enclosure_node *enclosure_dev;
9673 Mpi2ConfigReply_t mpi_reply;
9674 u16 enclosure_handle;
9677 /* Free existing enclosure list */
9678 mpt3sas_free_enclosure_list(ioc);
9680 /* Re constructing enclosure list after reset*/
9681 enclosure_handle = 0xFFFF;
9684 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9685 if (!enclosure_dev) {
9686 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9687 __FILE__, __LINE__, __func__);
9690 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9691 &enclosure_dev->pg0,
9692 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9695 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9696 MPI2_IOCSTATUS_MASK)) {
9697 kfree(enclosure_dev);
9700 list_add_tail(&enclosure_dev->list,
9701 &ioc->enclosure_list);
9703 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9708 * _scsih_search_responding_sas_devices -
9709 * @ioc: per adapter object
9711 * After host reset, find out whether devices are still responding.
9715 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9717 Mpi2SasDevicePage0_t sas_device_pg0;
9718 Mpi2ConfigReply_t mpi_reply;
9723 ioc_info(ioc, "search for end-devices: start\n");
9725 if (list_empty(&ioc->sas_device_list))
9729 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9730 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9732 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9733 MPI2_IOCSTATUS_MASK;
9734 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9736 handle = le16_to_cpu(sas_device_pg0.DevHandle);
9737 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9738 if (!(_scsih_is_end_device(device_info)))
9740 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
9744 ioc_info(ioc, "search for end-devices: complete\n");
9748 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
9749 * @ioc: per adapter object
9750 * @pcie_device_pg0: PCIe Device page 0
9752 * After host reset, find out whether devices are still responding.
9753 * Used in _scsih_remove_unresponding_devices.
9756 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9757 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9759 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9760 struct scsi_target *starget;
9761 struct _pcie_device *pcie_device;
9762 unsigned long flags;
9764 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9765 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
9766 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9767 && (pcie_device->slot == le16_to_cpu(
9768 pcie_device_pg0->Slot))) {
9769 pcie_device->access_status =
9770 pcie_device_pg0->AccessStatus;
9771 pcie_device->responding = 1;
9772 starget = pcie_device->starget;
9773 if (starget && starget->hostdata) {
9774 sas_target_priv_data = starget->hostdata;
9775 sas_target_priv_data->tm_busy = 0;
9776 sas_target_priv_data->deleted = 0;
9778 sas_target_priv_data = NULL;
9780 starget_printk(KERN_INFO, starget,
9781 "handle(0x%04x), wwid(0x%016llx) ",
9782 pcie_device->handle,
9783 (unsigned long long)pcie_device->wwid);
9784 if (pcie_device->enclosure_handle != 0)
9785 starget_printk(KERN_INFO, starget,
9786 "enclosure logical id(0x%016llx), "
9788 (unsigned long long)
9789 pcie_device->enclosure_logical_id,
9793 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9794 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9795 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9796 pcie_device->enclosure_level =
9797 pcie_device_pg0->EnclosureLevel;
9798 memcpy(&pcie_device->connector_name[0],
9799 &pcie_device_pg0->ConnectorName[0], 4);
9801 pcie_device->enclosure_level = 0;
9802 pcie_device->connector_name[0] = '\0';
9805 if (pcie_device->handle == le16_to_cpu(
9806 pcie_device_pg0->DevHandle))
9808 pr_info("\thandle changed from(0x%04x)!!!\n",
9809 pcie_device->handle);
9810 pcie_device->handle = le16_to_cpu(
9811 pcie_device_pg0->DevHandle);
9812 if (sas_target_priv_data)
9813 sas_target_priv_data->handle =
9814 le16_to_cpu(pcie_device_pg0->DevHandle);
9820 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9824 * _scsih_search_responding_pcie_devices -
9825 * @ioc: per adapter object
9827 * After host reset, find out whether devices are still responding.
9831 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9833 Mpi26PCIeDevicePage0_t pcie_device_pg0;
9834 Mpi2ConfigReply_t mpi_reply;
9839 ioc_info(ioc, "search for end-devices: start\n");
9841 if (list_empty(&ioc->pcie_device_list))
9845 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9846 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9848 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9849 MPI2_IOCSTATUS_MASK;
9850 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9851 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9852 __func__, ioc_status,
9853 le32_to_cpu(mpi_reply.IOCLogInfo));
9856 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9857 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9858 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
9860 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
9863 ioc_info(ioc, "search for PCIe end-devices: complete\n");
9867 * _scsih_mark_responding_raid_device - mark a raid_device as responding
9868 * @ioc: per adapter object
9869 * @wwid: world wide identifier for raid volume
9870 * @handle: device handle
9872 * After host reset, find out whether devices are still responding.
9873 * Used in _scsih_remove_unresponsive_raid_devices.
9876 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
9879 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9880 struct scsi_target *starget;
9881 struct _raid_device *raid_device;
9882 unsigned long flags;
9884 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9885 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
9886 if (raid_device->wwid == wwid && raid_device->starget) {
9887 starget = raid_device->starget;
9888 if (starget && starget->hostdata) {
9889 sas_target_priv_data = starget->hostdata;
9890 sas_target_priv_data->deleted = 0;
9892 sas_target_priv_data = NULL;
9893 raid_device->responding = 1;
9894 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9895 starget_printk(KERN_INFO, raid_device->starget,
9896 "handle(0x%04x), wwid(0x%016llx)\n", handle,
9897 (unsigned long long)raid_device->wwid);
9900 * WARPDRIVE: The handles of the PDs might have changed
9901 * across the host reset so re-initialize the
9902 * required data for Direct IO
9904 mpt3sas_init_warpdrive_properties(ioc, raid_device);
9905 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9906 if (raid_device->handle == handle) {
9907 spin_unlock_irqrestore(&ioc->raid_device_lock,
9911 pr_info("\thandle changed from(0x%04x)!!!\n",
9912 raid_device->handle);
9913 raid_device->handle = handle;
9914 if (sas_target_priv_data)
9915 sas_target_priv_data->handle = handle;
9916 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9920 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9924 * _scsih_search_responding_raid_devices -
9925 * @ioc: per adapter object
9927 * After host reset, find out whether devices are still responding.
9931 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
9933 Mpi2RaidVolPage1_t volume_pg1;
9934 Mpi2RaidVolPage0_t volume_pg0;
9935 Mpi2RaidPhysDiskPage0_t pd_pg0;
9936 Mpi2ConfigReply_t mpi_reply;
9941 if (!ioc->ir_firmware)
9944 ioc_info(ioc, "search for raid volumes: start\n");
9946 if (list_empty(&ioc->raid_device_list))
9950 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9951 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9952 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9953 MPI2_IOCSTATUS_MASK;
9954 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9956 handle = le16_to_cpu(volume_pg1.DevHandle);
9958 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9959 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9960 sizeof(Mpi2RaidVolPage0_t)))
9963 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9964 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9965 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
9966 _scsih_mark_responding_raid_device(ioc,
9967 le64_to_cpu(volume_pg1.WWID), handle);
9970 /* refresh the pd_handles */
9971 if (!ioc->is_warpdrive) {
9972 phys_disk_num = 0xFF;
9973 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
9974 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9975 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9977 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9978 MPI2_IOCSTATUS_MASK;
9979 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9981 phys_disk_num = pd_pg0.PhysDiskNum;
9982 handle = le16_to_cpu(pd_pg0.DevHandle);
9983 set_bit(handle, ioc->pd_handles);
9987 ioc_info(ioc, "search for responding raid volumes: complete\n");
9991 * _scsih_mark_responding_expander - mark a expander as responding
9992 * @ioc: per adapter object
9993 * @expander_pg0:SAS Expander Config Page0
9995 * After host reset, find out whether devices are still responding.
9996 * Used in _scsih_remove_unresponsive_expanders.
9999 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
10000 Mpi2ExpanderPage0_t *expander_pg0)
10002 struct _sas_node *sas_expander = NULL;
10003 unsigned long flags;
10005 struct _enclosure_node *enclosure_dev = NULL;
10006 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
10007 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
10008 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
10009 struct hba_port *port = mpt3sas_get_port_by_id(
10010 ioc, expander_pg0->PhysicalPort, 0);
10012 if (enclosure_handle)
10014 mpt3sas_scsih_enclosure_find_by_handle(ioc,
10017 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10018 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10019 if (sas_expander->sas_address != sas_address)
10021 if (sas_expander->port != port)
10023 sas_expander->responding = 1;
10025 if (enclosure_dev) {
10026 sas_expander->enclosure_logical_id =
10027 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10028 sas_expander->enclosure_handle =
10029 le16_to_cpu(expander_pg0->EnclosureHandle);
10032 if (sas_expander->handle == handle)
10034 pr_info("\texpander(0x%016llx): handle changed" \
10035 " from(0x%04x) to (0x%04x)!!!\n",
10036 (unsigned long long)sas_expander->sas_address,
10037 sas_expander->handle, handle);
10038 sas_expander->handle = handle;
10039 for (i = 0 ; i < sas_expander->num_phys ; i++)
10040 sas_expander->phy[i].handle = handle;
10044 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10048 * _scsih_search_responding_expanders -
10049 * @ioc: per adapter object
10051 * After host reset, find out whether devices are still responding.
10055 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10057 Mpi2ExpanderPage0_t expander_pg0;
10058 Mpi2ConfigReply_t mpi_reply;
10064 ioc_info(ioc, "search for expanders: start\n");
10066 if (list_empty(&ioc->sas_expander_list))
10070 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10071 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10073 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10074 MPI2_IOCSTATUS_MASK;
10075 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10078 handle = le16_to_cpu(expander_pg0.DevHandle);
10079 sas_address = le64_to_cpu(expander_pg0.SASAddress);
10080 port = expander_pg0.PhysicalPort;
10082 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10083 handle, (unsigned long long)sas_address,
10084 (ioc->multipath_on_hba ?
10085 port : MULTIPATH_DISABLED_PORT_ID));
10086 _scsih_mark_responding_expander(ioc, &expander_pg0);
10090 ioc_info(ioc, "search for expanders: complete\n");
10094 * _scsih_remove_unresponding_devices - removing unresponding devices
10095 * @ioc: per adapter object
10098 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
10100 struct _sas_device *sas_device, *sas_device_next;
10101 struct _sas_node *sas_expander, *sas_expander_next;
10102 struct _raid_device *raid_device, *raid_device_next;
10103 struct _pcie_device *pcie_device, *pcie_device_next;
10104 struct list_head tmp_list;
10105 unsigned long flags;
10108 ioc_info(ioc, "removing unresponding devices: start\n");
10110 /* removing unresponding end devices */
10111 ioc_info(ioc, "removing unresponding devices: end-devices\n");
10113 * Iterate, pulling off devices marked as non-responding. We become the
10114 * owner for the reference the list had on any object we prune.
10116 spin_lock_irqsave(&ioc->sas_device_lock, flags);
10117 list_for_each_entry_safe(sas_device, sas_device_next,
10118 &ioc->sas_device_list, list) {
10119 if (!sas_device->responding)
10120 list_move_tail(&sas_device->list, &head);
10122 sas_device->responding = 0;
10124 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10127 * Now, uninitialize and remove the unresponding devices we pruned.
10129 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10130 _scsih_remove_device(ioc, sas_device);
10131 list_del_init(&sas_device->list);
10132 sas_device_put(sas_device);
10135 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
10136 INIT_LIST_HEAD(&head);
10137 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10138 list_for_each_entry_safe(pcie_device, pcie_device_next,
10139 &ioc->pcie_device_list, list) {
10140 if (!pcie_device->responding)
10141 list_move_tail(&pcie_device->list, &head);
10143 pcie_device->responding = 0;
10145 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10147 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10148 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10149 list_del_init(&pcie_device->list);
10150 pcie_device_put(pcie_device);
10153 /* removing unresponding volumes */
10154 if (ioc->ir_firmware) {
10155 ioc_info(ioc, "removing unresponding devices: volumes\n");
10156 list_for_each_entry_safe(raid_device, raid_device_next,
10157 &ioc->raid_device_list, list) {
10158 if (!raid_device->responding)
10159 _scsih_sas_volume_delete(ioc,
10160 raid_device->handle);
10162 raid_device->responding = 0;
10166 /* removing unresponding expanders */
10167 ioc_info(ioc, "removing unresponding devices: expanders\n");
10168 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10169 INIT_LIST_HEAD(&tmp_list);
10170 list_for_each_entry_safe(sas_expander, sas_expander_next,
10171 &ioc->sas_expander_list, list) {
10172 if (!sas_expander->responding)
10173 list_move_tail(&sas_expander->list, &tmp_list);
10175 sas_expander->responding = 0;
10177 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10178 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10180 _scsih_expander_node_remove(ioc, sas_expander);
10183 ioc_info(ioc, "removing unresponding devices: complete\n");
10185 /* unblock devices */
10186 _scsih_ublock_io_all_device(ioc);
10190 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10191 struct _sas_node *sas_expander, u16 handle)
10193 Mpi2ExpanderPage1_t expander_pg1;
10194 Mpi2ConfigReply_t mpi_reply;
10197 for (i = 0 ; i < sas_expander->num_phys ; i++) {
10198 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10199 &expander_pg1, i, handle))) {
10200 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10201 __FILE__, __LINE__, __func__);
10205 mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10206 le16_to_cpu(expander_pg1.AttachedDevHandle), i,
10207 expander_pg1.NegotiatedLinkRate >> 4,
10208 sas_expander->port);
10213 * _scsih_scan_for_devices_after_reset - scan for devices after host reset
10214 * @ioc: per adapter object
10217 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10219 Mpi2ExpanderPage0_t expander_pg0;
10220 Mpi2SasDevicePage0_t sas_device_pg0;
10221 Mpi26PCIeDevicePage0_t pcie_device_pg0;
10222 Mpi2RaidVolPage1_t volume_pg1;
10223 Mpi2RaidVolPage0_t volume_pg0;
10224 Mpi2RaidPhysDiskPage0_t pd_pg0;
10225 Mpi2EventIrConfigElement_t element;
10226 Mpi2ConfigReply_t mpi_reply;
10227 u8 phys_disk_num, port_id;
10229 u16 handle, parent_handle;
10231 struct _sas_device *sas_device;
10232 struct _pcie_device *pcie_device;
10233 struct _sas_node *expander_device;
10234 static struct _raid_device *raid_device;
10236 unsigned long flags;
10238 ioc_info(ioc, "scan devices: start\n");
10240 _scsih_sas_host_refresh(ioc);
10242 ioc_info(ioc, "\tscan devices: expanders start\n");
10246 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10247 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10248 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10249 MPI2_IOCSTATUS_MASK;
10250 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10251 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10252 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10255 handle = le16_to_cpu(expander_pg0.DevHandle);
10256 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10257 port_id = expander_pg0.PhysicalPort;
10258 expander_device = mpt3sas_scsih_expander_find_by_sas_address(
10259 ioc, le64_to_cpu(expander_pg0.SASAddress),
10260 mpt3sas_get_port_by_id(ioc, port_id, 0));
10261 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10262 if (expander_device)
10263 _scsih_refresh_expander_links(ioc, expander_device,
10266 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10268 (u64)le64_to_cpu(expander_pg0.SASAddress));
10269 _scsih_expander_add(ioc, handle);
10270 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10272 (u64)le64_to_cpu(expander_pg0.SASAddress));
10276 ioc_info(ioc, "\tscan devices: expanders complete\n");
10278 if (!ioc->ir_firmware)
10281 ioc_info(ioc, "\tscan devices: phys disk start\n");
10284 phys_disk_num = 0xFF;
10285 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10286 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10288 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10289 MPI2_IOCSTATUS_MASK;
10290 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10291 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10292 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10295 phys_disk_num = pd_pg0.PhysDiskNum;
10296 handle = le16_to_cpu(pd_pg0.DevHandle);
10297 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10299 sas_device_put(sas_device);
10302 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10303 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10306 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10307 MPI2_IOCSTATUS_MASK;
10308 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10309 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10310 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10313 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10314 if (!_scsih_get_sas_address(ioc, parent_handle,
10316 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10318 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10319 port_id = sas_device_pg0.PhysicalPort;
10320 mpt3sas_transport_update_links(ioc, sas_address,
10321 handle, sas_device_pg0.PhyNum,
10322 MPI2_SAS_NEG_LINK_RATE_1_5,
10323 mpt3sas_get_port_by_id(ioc, port_id, 0));
10324 set_bit(handle, ioc->pd_handles);
10326 /* This will retry adding the end device.
10327 * _scsih_add_device() will decide on retries and
10328 * return "1" when it should be retried
10330 while (_scsih_add_device(ioc, handle, retry_count++,
10334 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10336 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10340 ioc_info(ioc, "\tscan devices: phys disk complete\n");
10342 ioc_info(ioc, "\tscan devices: volumes start\n");
10346 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10347 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10348 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10349 MPI2_IOCSTATUS_MASK;
10350 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10351 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10352 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10355 handle = le16_to_cpu(volume_pg1.DevHandle);
10356 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10357 raid_device = _scsih_raid_device_find_by_wwid(ioc,
10358 le64_to_cpu(volume_pg1.WWID));
10359 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10362 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10363 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10364 sizeof(Mpi2RaidVolPage0_t)))
10366 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10367 MPI2_IOCSTATUS_MASK;
10368 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10369 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10370 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10373 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10374 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10375 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
10376 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10377 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
10378 element.VolDevHandle = volume_pg1.DevHandle;
10379 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
10380 volume_pg1.DevHandle);
10381 _scsih_sas_volume_add(ioc, &element);
10382 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
10383 volume_pg1.DevHandle);
10387 ioc_info(ioc, "\tscan devices: volumes complete\n");
10391 ioc_info(ioc, "\tscan devices: end devices start\n");
10395 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10396 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10398 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10399 MPI2_IOCSTATUS_MASK;
10400 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10401 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10402 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10405 handle = le16_to_cpu(sas_device_pg0.DevHandle);
10406 if (!(_scsih_is_end_device(
10407 le32_to_cpu(sas_device_pg0.DeviceInfo))))
10409 port_id = sas_device_pg0.PhysicalPort;
10410 sas_device = mpt3sas_get_sdev_by_addr(ioc,
10411 le64_to_cpu(sas_device_pg0.SASAddress),
10412 mpt3sas_get_port_by_id(ioc, port_id, 0));
10414 sas_device_put(sas_device);
10417 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10418 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
10419 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10421 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10422 mpt3sas_transport_update_links(ioc, sas_address, handle,
10423 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10424 mpt3sas_get_port_by_id(ioc, port_id, 0));
10426 /* This will retry adding the end device.
10427 * _scsih_add_device() will decide on retries and
10428 * return "1" when it should be retried
10430 while (_scsih_add_device(ioc, handle, retry_count++,
10434 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10436 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10439 ioc_info(ioc, "\tscan devices: end devices complete\n");
10440 ioc_info(ioc, "\tscan devices: pcie end devices start\n");
10444 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10445 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10447 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10448 & MPI2_IOCSTATUS_MASK;
10449 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10450 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10451 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10454 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
10455 if (!(_scsih_is_nvme_pciescsi_device(
10456 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10458 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10459 le64_to_cpu(pcie_device_pg0.WWID));
10461 pcie_device_put(pcie_device);
10465 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10466 _scsih_pcie_add_device(ioc, handle);
10468 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10469 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
10471 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10472 ioc_info(ioc, "scan devices: complete\n");
10476 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
10477 * @ioc: per adapter object
10479 * The handler for doing any required cleanup or initialization.
10481 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10483 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
10487 * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
10489 * @ioc: per adapter object
10491 * The handler for doing any required cleanup or initialization.
10494 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
10497 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
10498 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10499 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10500 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10501 complete(&ioc->scsih_cmds.done);
10503 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10504 ioc->tm_cmds.status |= MPT3_CMD_RESET;
10505 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10506 complete(&ioc->tm_cmds.done);
10509 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10510 memset(ioc->device_remove_in_progress, 0,
10511 ioc->device_remove_in_progress_sz);
10512 _scsih_fw_event_cleanup_queue(ioc);
10513 _scsih_flush_running_cmds(ioc);
10517 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
10518 * @ioc: per adapter object
10520 * The handler for doing any required cleanup or initialization.
10523 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10525 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
10526 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
10527 !ioc->sas_hba.num_phys)) {
10528 if (ioc->multipath_on_hba) {
10529 _scsih_sas_port_refresh(ioc);
10530 _scsih_update_vphys_after_reset(ioc);
10532 _scsih_prep_device_scan(ioc);
10533 _scsih_create_enclosure_list_after_reset(ioc);
10534 _scsih_search_responding_sas_devices(ioc);
10535 _scsih_search_responding_pcie_devices(ioc);
10536 _scsih_search_responding_raid_devices(ioc);
10537 _scsih_search_responding_expanders(ioc);
10538 _scsih_error_recovery_delete_devices(ioc);
10543 * _mpt3sas_fw_work - delayed task for processing firmware events
10544 * @ioc: per adapter object
10545 * @fw_event: The fw_event_work object
10549 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10551 ioc->current_event = fw_event;
10552 _scsih_fw_event_del_from_list(ioc, fw_event);
10554 /* the queue is being flushed so ignore this event */
10555 if (ioc->remove_host || ioc->pci_error_recovery) {
10556 fw_event_work_put(fw_event);
10557 ioc->current_event = NULL;
10561 switch (fw_event->event) {
10562 case MPT3SAS_PROCESS_TRIGGER_DIAG:
10563 mpt3sas_process_trigger_data(ioc,
10564 (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10565 fw_event->event_data);
10567 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
10568 while (scsi_host_in_recovery(ioc->shost) ||
10569 ioc->shost_recovery) {
10571 * If we're unloading or cancelling the work, bail.
10572 * Otherwise, this can become an infinite loop.
10574 if (ioc->remove_host || ioc->fw_events_cleanup)
10578 _scsih_remove_unresponding_devices(ioc);
10579 _scsih_del_dirty_vphy(ioc);
10580 _scsih_del_dirty_port_entries(ioc);
10581 _scsih_scan_for_devices_after_reset(ioc);
10582 _scsih_set_nvme_max_shutdown_latency(ioc);
10584 case MPT3SAS_PORT_ENABLE_COMPLETE:
10585 ioc->start_scan = 0;
10586 if (missing_delay[0] != -1 && missing_delay[1] != -1)
10587 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10590 ioc_info(ioc, "port enable: complete from worker thread\n"));
10592 case MPT3SAS_TURN_ON_PFA_LED:
10593 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
10595 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10596 _scsih_sas_topology_change_event(ioc, fw_event);
10598 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10599 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10600 _scsih_sas_device_status_change_event_debug(ioc,
10601 (Mpi2EventDataSasDeviceStatusChange_t *)
10602 fw_event->event_data);
10604 case MPI2_EVENT_SAS_DISCOVERY:
10605 _scsih_sas_discovery_event(ioc, fw_event);
10607 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10608 _scsih_sas_device_discovery_error_event(ioc, fw_event);
10610 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10611 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
10613 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10614 _scsih_sas_enclosure_dev_status_change_event(ioc,
10617 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10618 _scsih_sas_ir_config_change_event(ioc, fw_event);
10620 case MPI2_EVENT_IR_VOLUME:
10621 _scsih_sas_ir_volume_event(ioc, fw_event);
10623 case MPI2_EVENT_IR_PHYSICAL_DISK:
10624 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
10626 case MPI2_EVENT_IR_OPERATION_STATUS:
10627 _scsih_sas_ir_operation_status_event(ioc, fw_event);
10629 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10630 _scsih_pcie_device_status_change_event(ioc, fw_event);
10632 case MPI2_EVENT_PCIE_ENUMERATION:
10633 _scsih_pcie_enumeration_event(ioc, fw_event);
10635 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10636 _scsih_pcie_topology_change_event(ioc, fw_event);
10637 ioc->current_event = NULL;
10642 fw_event_work_put(fw_event);
10643 ioc->current_event = NULL;
10647 * _firmware_event_work
10648 * @work: The fw_event_work object
10651 * wrappers for the work thread handling firmware events
10655 _firmware_event_work(struct work_struct *work)
10657 struct fw_event_work *fw_event = container_of(work,
10658 struct fw_event_work, work);
10660 _mpt3sas_fw_work(fw_event->ioc, fw_event);
10664 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
10665 * @ioc: per adapter object
10666 * @msix_index: MSIX table index supplied by the OS
10667 * @reply: reply message frame(lower 32bit addr)
10668 * Context: interrupt.
10670 * This function merely adds a new work task into ioc->firmware_event_thread.
10671 * The tasks are worked from _firmware_event_work in user context.
10673 * Return: 1 meaning mf should be freed from _base_interrupt
10674 * 0 means the mf is freed from this function.
10677 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10680 struct fw_event_work *fw_event;
10681 Mpi2EventNotificationReply_t *mpi_reply;
10684 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
10686 /* events turned off due to host reset */
10687 if (ioc->pci_error_recovery)
10690 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10692 if (unlikely(!mpi_reply)) {
10693 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10694 __FILE__, __LINE__, __func__);
10698 event = le16_to_cpu(mpi_reply->Event);
10700 if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10701 mpt3sas_trigger_event(ioc, event, 0);
10705 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10707 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10708 (Mpi2EventDataSasBroadcastPrimitive_t *)
10709 mpi_reply->EventData;
10711 if (baen_data->Primitive !=
10712 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10715 if (ioc->broadcast_aen_busy) {
10716 ioc->broadcast_aen_pending++;
10719 ioc->broadcast_aen_busy = 1;
10723 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10724 _scsih_check_topo_delete_events(ioc,
10725 (Mpi2EventDataSasTopologyChangeList_t *)
10726 mpi_reply->EventData);
10728 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10729 _scsih_check_pcie_topo_remove_events(ioc,
10730 (Mpi26EventDataPCIeTopologyChangeList_t *)
10731 mpi_reply->EventData);
10733 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10734 _scsih_check_ir_config_unhide_events(ioc,
10735 (Mpi2EventDataIrConfigChangeList_t *)
10736 mpi_reply->EventData);
10738 case MPI2_EVENT_IR_VOLUME:
10739 _scsih_check_volume_delete_events(ioc,
10740 (Mpi2EventDataIrVolume_t *)
10741 mpi_reply->EventData);
10743 case MPI2_EVENT_LOG_ENTRY_ADDED:
10745 Mpi2EventDataLogEntryAdded_t *log_entry;
10748 if (!ioc->is_warpdrive)
10751 log_entry = (Mpi2EventDataLogEntryAdded_t *)
10752 mpi_reply->EventData;
10753 log_code = (u32 *)log_entry->LogData;
10755 if (le16_to_cpu(log_entry->LogEntryQualifier)
10756 != MPT2_WARPDRIVE_LOGENTRY)
10759 switch (le32_to_cpu(*log_code)) {
10760 case MPT2_WARPDRIVE_LC_SSDT:
10761 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10763 case MPT2_WARPDRIVE_LC_SSDLW:
10764 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
10766 case MPT2_WARPDRIVE_LC_SSDLF:
10767 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
10769 case MPT2_WARPDRIVE_LC_BRMF:
10770 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10776 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10777 _scsih_sas_device_status_change_event(ioc,
10778 (Mpi2EventDataSasDeviceStatusChange_t *)
10779 mpi_reply->EventData);
10781 case MPI2_EVENT_IR_OPERATION_STATUS:
10782 case MPI2_EVENT_SAS_DISCOVERY:
10783 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10784 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10785 case MPI2_EVENT_IR_PHYSICAL_DISK:
10786 case MPI2_EVENT_PCIE_ENUMERATION:
10787 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10790 case MPI2_EVENT_TEMP_THRESHOLD:
10791 _scsih_temp_threshold_events(ioc,
10792 (Mpi2EventDataTemperature_t *)
10793 mpi_reply->EventData);
10795 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10796 ActiveCableEventData =
10797 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
10798 switch (ActiveCableEventData->ReasonCode) {
10799 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
10800 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
10801 ActiveCableEventData->ReceptacleID);
10802 pr_notice("cannot be powered and devices connected\n");
10803 pr_notice("to this active cable will not be seen\n");
10804 pr_notice("This active cable requires %d mW of power\n",
10805 ActiveCableEventData->ActiveCablePowerRequirement);
10808 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
10809 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
10810 ActiveCableEventData->ReceptacleID);
10812 "is not running at optimal speed(12 Gb/s rate)\n");
10818 default: /* ignore the rest */
10822 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
10823 fw_event = alloc_fw_event_work(sz);
10825 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10826 __FILE__, __LINE__, __func__);
10830 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
10831 fw_event->ioc = ioc;
10832 fw_event->VF_ID = mpi_reply->VF_ID;
10833 fw_event->VP_ID = mpi_reply->VP_ID;
10834 fw_event->event = event;
10835 _scsih_fw_event_add(ioc, fw_event);
10836 fw_event_work_put(fw_event);
10841 * _scsih_expander_node_remove - removing expander device from list.
10842 * @ioc: per adapter object
10843 * @sas_expander: the sas_device object
10845 * Removing object and freeing associated memory from the
10846 * ioc->sas_expander_list.
10849 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
10850 struct _sas_node *sas_expander)
10852 struct _sas_port *mpt3sas_port, *next;
10853 unsigned long flags;
10855 /* remove sibling ports attached to this expander */
10856 list_for_each_entry_safe(mpt3sas_port, next,
10857 &sas_expander->sas_port_list, port_list) {
10858 if (ioc->shost_recovery)
10860 if (mpt3sas_port->remote_identify.device_type ==
10862 mpt3sas_device_remove_by_sas_address(ioc,
10863 mpt3sas_port->remote_identify.sas_address,
10864 mpt3sas_port->hba_port);
10865 else if (mpt3sas_port->remote_identify.device_type ==
10866 SAS_EDGE_EXPANDER_DEVICE ||
10867 mpt3sas_port->remote_identify.device_type ==
10868 SAS_FANOUT_EXPANDER_DEVICE)
10869 mpt3sas_expander_remove(ioc,
10870 mpt3sas_port->remote_identify.sas_address,
10871 mpt3sas_port->hba_port);
10874 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
10875 sas_expander->sas_address_parent, sas_expander->port);
10878 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10879 sas_expander->handle, (unsigned long long)
10880 sas_expander->sas_address,
10881 sas_expander->port->port_id);
10883 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10884 list_del(&sas_expander->list);
10885 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10887 kfree(sas_expander->phy);
10888 kfree(sas_expander);
10892 * _scsih_nvme_shutdown - NVMe shutdown notification
10893 * @ioc: per adapter object
10895 * Sending IoUnitControl request with shutdown operation code to alert IOC that
10896 * the host system is shutting down so that IOC can issue NVMe shutdown to
10897 * NVMe drives attached to it.
10900 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
10902 Mpi26IoUnitControlRequest_t *mpi_request;
10903 Mpi26IoUnitControlReply_t *mpi_reply;
10906 /* are there any NVMe devices ? */
10907 if (list_empty(&ioc->pcie_device_list))
10910 mutex_lock(&ioc->scsih_cmds.mutex);
10912 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
10913 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
10917 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
10919 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
10922 "%s: failed obtaining a smid\n", __func__);
10923 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10927 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
10928 ioc->scsih_cmds.smid = smid;
10929 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
10930 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
10931 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
10933 init_completion(&ioc->scsih_cmds.done);
10934 ioc->put_smid_default(ioc, smid);
10935 /* Wait for max_shutdown_latency seconds */
10937 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
10938 ioc->max_shutdown_latency);
10939 wait_for_completion_timeout(&ioc->scsih_cmds.done,
10940 ioc->max_shutdown_latency*HZ);
10942 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
10943 ioc_err(ioc, "%s: timeout\n", __func__);
10947 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
10948 mpi_reply = ioc->scsih_cmds.reply;
10949 ioc_info(ioc, "Io Unit Control shutdown (complete):"
10950 "ioc_status(0x%04x), loginfo(0x%08x)\n",
10951 le16_to_cpu(mpi_reply->IOCStatus),
10952 le32_to_cpu(mpi_reply->IOCLogInfo));
10955 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10956 mutex_unlock(&ioc->scsih_cmds.mutex);
10961 * _scsih_ir_shutdown - IR shutdown notification
10962 * @ioc: per adapter object
10964 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
10965 * the host system is shutting down.
10968 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
10970 Mpi2RaidActionRequest_t *mpi_request;
10971 Mpi2RaidActionReply_t *mpi_reply;
10974 /* is IR firmware build loaded ? */
10975 if (!ioc->ir_firmware)
10978 /* are there any volumes ? */
10979 if (list_empty(&ioc->raid_device_list))
10982 mutex_lock(&ioc->scsih_cmds.mutex);
10984 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
10985 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
10988 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
10990 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
10992 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
10993 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10997 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
10998 ioc->scsih_cmds.smid = smid;
10999 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
11001 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
11002 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
11004 if (!ioc->hide_ir_msg)
11005 ioc_info(ioc, "IR shutdown (sending)\n");
11006 init_completion(&ioc->scsih_cmds.done);
11007 ioc->put_smid_default(ioc, smid);
11008 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11010 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11011 ioc_err(ioc, "%s: timeout\n", __func__);
11015 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11016 mpi_reply = ioc->scsih_cmds.reply;
11017 if (!ioc->hide_ir_msg)
11018 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11019 le16_to_cpu(mpi_reply->IOCStatus),
11020 le32_to_cpu(mpi_reply->IOCLogInfo));
11024 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11025 mutex_unlock(&ioc->scsih_cmds.mutex);
11029 * _scsih_get_shost_and_ioc - get shost and ioc
11030 * and verify whether they are NULL or not
11031 * @pdev: PCI device struct
11032 * @shost: address of scsi host pointer
11033 * @ioc: address of HBA adapter pointer
11035 * Return zero if *shost and *ioc are not NULL otherwise return error number.
11038 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
11039 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11041 *shost = pci_get_drvdata(pdev);
11042 if (*shost == NULL) {
11043 dev_err(&pdev->dev, "pdev's driver data is null\n");
11047 *ioc = shost_priv(*shost);
11048 if (*ioc == NULL) {
11049 dev_err(&pdev->dev, "shost's private data is null\n");
11057 * scsih_remove - detach and remove add host
11058 * @pdev: PCI device struct
11060 * Routine called when unloading the driver.
11062 static void scsih_remove(struct pci_dev *pdev)
11064 struct Scsi_Host *shost;
11065 struct MPT3SAS_ADAPTER *ioc;
11066 struct _sas_port *mpt3sas_port, *next_port;
11067 struct _raid_device *raid_device, *next;
11068 struct MPT3SAS_TARGET *sas_target_priv_data;
11069 struct _pcie_device *pcie_device, *pcienext;
11070 struct workqueue_struct *wq;
11071 unsigned long flags;
11072 Mpi2ConfigReply_t mpi_reply;
11073 struct hba_port *port, *port_next;
11075 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11078 ioc->remove_host = 1;
11080 if (!pci_device_is_present(pdev))
11081 _scsih_flush_running_cmds(ioc);
11083 _scsih_fw_event_cleanup_queue(ioc);
11085 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11086 wq = ioc->firmware_event_thread;
11087 ioc->firmware_event_thread = NULL;
11088 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11090 destroy_workqueue(wq);
11092 * Copy back the unmodified ioc page1. so that on next driver load,
11093 * current modified changes on ioc page1 won't take effect.
11095 if (ioc->is_aero_ioc)
11096 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11097 &ioc->ioc_pg1_copy);
11098 /* release all the volumes */
11099 _scsih_ir_shutdown(ioc);
11100 mpt3sas_destroy_debugfs(ioc);
11101 sas_remove_host(shost);
11102 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11104 if (raid_device->starget) {
11105 sas_target_priv_data =
11106 raid_device->starget->hostdata;
11107 sas_target_priv_data->deleted = 1;
11108 scsi_remove_target(&raid_device->starget->dev);
11110 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11111 raid_device->handle, (u64)raid_device->wwid);
11112 _scsih_raid_device_remove(ioc, raid_device);
11114 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11116 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11117 list_del_init(&pcie_device->list);
11118 pcie_device_put(pcie_device);
11121 /* free ports attached to the sas_host */
11122 list_for_each_entry_safe(mpt3sas_port, next_port,
11123 &ioc->sas_hba.sas_port_list, port_list) {
11124 if (mpt3sas_port->remote_identify.device_type ==
11126 mpt3sas_device_remove_by_sas_address(ioc,
11127 mpt3sas_port->remote_identify.sas_address,
11128 mpt3sas_port->hba_port);
11129 else if (mpt3sas_port->remote_identify.device_type ==
11130 SAS_EDGE_EXPANDER_DEVICE ||
11131 mpt3sas_port->remote_identify.device_type ==
11132 SAS_FANOUT_EXPANDER_DEVICE)
11133 mpt3sas_expander_remove(ioc,
11134 mpt3sas_port->remote_identify.sas_address,
11135 mpt3sas_port->hba_port);
11138 list_for_each_entry_safe(port, port_next,
11139 &ioc->port_table_list, list) {
11140 list_del(&port->list);
11144 /* free phys attached to the sas_host */
11145 if (ioc->sas_hba.num_phys) {
11146 kfree(ioc->sas_hba.phy);
11147 ioc->sas_hba.phy = NULL;
11148 ioc->sas_hba.num_phys = 0;
11151 mpt3sas_base_detach(ioc);
11152 spin_lock(&gioc_lock);
11153 list_del(&ioc->list);
11154 spin_unlock(&gioc_lock);
11155 scsi_host_put(shost);
11159 * scsih_shutdown - routine call during system shutdown
11160 * @pdev: PCI device struct
11163 scsih_shutdown(struct pci_dev *pdev)
11165 struct Scsi_Host *shost;
11166 struct MPT3SAS_ADAPTER *ioc;
11167 struct workqueue_struct *wq;
11168 unsigned long flags;
11169 Mpi2ConfigReply_t mpi_reply;
11171 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11174 ioc->remove_host = 1;
11176 if (!pci_device_is_present(pdev))
11177 _scsih_flush_running_cmds(ioc);
11179 _scsih_fw_event_cleanup_queue(ioc);
11181 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11182 wq = ioc->firmware_event_thread;
11183 ioc->firmware_event_thread = NULL;
11184 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11186 destroy_workqueue(wq);
11188 * Copy back the unmodified ioc page1 so that on next driver load,
11189 * current modified changes on ioc page1 won't take effect.
11191 if (ioc->is_aero_ioc)
11192 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11193 &ioc->ioc_pg1_copy);
11195 _scsih_ir_shutdown(ioc);
11196 _scsih_nvme_shutdown(ioc);
11197 mpt3sas_base_detach(ioc);
11202 * _scsih_probe_boot_devices - reports 1st device
11203 * @ioc: per adapter object
11205 * If specified in bios page 2, this routine reports the 1st
11206 * device scsi-ml or sas transport for persistent boot device
11207 * purposes. Please refer to function _scsih_determine_boot_device()
11210 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11214 struct _sas_device *sas_device;
11215 struct _raid_device *raid_device;
11216 struct _pcie_device *pcie_device;
11218 u64 sas_address_parent;
11220 unsigned long flags;
11223 struct hba_port *port;
11225 /* no Bios, return immediately */
11226 if (!ioc->bios_pg3.BiosVersion)
11230 if (ioc->req_boot_device.device) {
11231 device = ioc->req_boot_device.device;
11232 channel = ioc->req_boot_device.channel;
11233 } else if (ioc->req_alt_boot_device.device) {
11234 device = ioc->req_alt_boot_device.device;
11235 channel = ioc->req_alt_boot_device.channel;
11236 } else if (ioc->current_boot_device.device) {
11237 device = ioc->current_boot_device.device;
11238 channel = ioc->current_boot_device.channel;
11244 if (channel == RAID_CHANNEL) {
11245 raid_device = device;
11246 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11247 raid_device->id, 0);
11249 _scsih_raid_device_remove(ioc, raid_device);
11250 } else if (channel == PCIE_CHANNEL) {
11251 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11252 pcie_device = device;
11253 tid = pcie_device->id;
11254 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11255 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11256 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11258 _scsih_pcie_device_remove(ioc, pcie_device);
11260 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11261 sas_device = device;
11262 handle = sas_device->handle;
11263 sas_address_parent = sas_device->sas_address_parent;
11264 sas_address = sas_device->sas_address;
11265 port = sas_device->port;
11266 list_move_tail(&sas_device->list, &ioc->sas_device_list);
11267 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11269 if (ioc->hide_drives)
11275 if (!mpt3sas_transport_port_add(ioc, handle,
11276 sas_address_parent, port)) {
11277 _scsih_sas_device_remove(ioc, sas_device);
11278 } else if (!sas_device->starget) {
11279 if (!ioc->is_driver_loading) {
11280 mpt3sas_transport_port_remove(ioc,
11282 sas_address_parent, port);
11283 _scsih_sas_device_remove(ioc, sas_device);
11290 * _scsih_probe_raid - reporting raid volumes to scsi-ml
11291 * @ioc: per adapter object
11293 * Called during initial loading of the driver.
11296 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11298 struct _raid_device *raid_device, *raid_next;
11301 list_for_each_entry_safe(raid_device, raid_next,
11302 &ioc->raid_device_list, list) {
11303 if (raid_device->starget)
11305 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11306 raid_device->id, 0);
11308 _scsih_raid_device_remove(ioc, raid_device);
11312 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11314 struct _sas_device *sas_device = NULL;
11315 unsigned long flags;
11317 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11318 if (!list_empty(&ioc->sas_device_init_list)) {
11319 sas_device = list_first_entry(&ioc->sas_device_init_list,
11320 struct _sas_device, list);
11321 sas_device_get(sas_device);
11323 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11328 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11329 struct _sas_device *sas_device)
11331 unsigned long flags;
11333 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11336 * Since we dropped the lock during the call to port_add(), we need to
11337 * be careful here that somebody else didn't move or delete this item
11338 * while we were busy with other things.
11340 * If it was on the list, we need a put() for the reference the list
11341 * had. Either way, we need a get() for the destination list.
11343 if (!list_empty(&sas_device->list)) {
11344 list_del_init(&sas_device->list);
11345 sas_device_put(sas_device);
11348 sas_device_get(sas_device);
11349 list_add_tail(&sas_device->list, &ioc->sas_device_list);
11351 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11355 * _scsih_probe_sas - reporting sas devices to sas transport
11356 * @ioc: per adapter object
11358 * Called during initial loading of the driver.
11361 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11363 struct _sas_device *sas_device;
11365 if (ioc->hide_drives)
11368 while ((sas_device = get_next_sas_device(ioc))) {
11369 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
11370 sas_device->sas_address_parent, sas_device->port)) {
11371 _scsih_sas_device_remove(ioc, sas_device);
11372 sas_device_put(sas_device);
11374 } else if (!sas_device->starget) {
11376 * When asyn scanning is enabled, its not possible to
11377 * remove devices while scanning is turned on due to an
11378 * oops in scsi_sysfs_add_sdev()->add_device()->
11379 * sysfs_addrm_start()
11381 if (!ioc->is_driver_loading) {
11382 mpt3sas_transport_port_remove(ioc,
11383 sas_device->sas_address,
11384 sas_device->sas_address_parent,
11386 _scsih_sas_device_remove(ioc, sas_device);
11387 sas_device_put(sas_device);
11391 sas_device_make_active(ioc, sas_device);
11392 sas_device_put(sas_device);
11397 * get_next_pcie_device - Get the next pcie device
11398 * @ioc: per adapter object
11400 * Get the next pcie device from pcie_device_init_list list.
11402 * Return: pcie device structure if pcie_device_init_list list is not empty
11403 * otherwise returns NULL
11405 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11407 struct _pcie_device *pcie_device = NULL;
11408 unsigned long flags;
11410 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11411 if (!list_empty(&ioc->pcie_device_init_list)) {
11412 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11413 struct _pcie_device, list);
11414 pcie_device_get(pcie_device);
11416 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11418 return pcie_device;
11422 * pcie_device_make_active - Add pcie device to pcie_device_list list
11423 * @ioc: per adapter object
11424 * @pcie_device: pcie device object
11426 * Add the pcie device which has registered with SCSI Transport Later to
11427 * pcie_device_list list
11429 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11430 struct _pcie_device *pcie_device)
11432 unsigned long flags;
11434 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11436 if (!list_empty(&pcie_device->list)) {
11437 list_del_init(&pcie_device->list);
11438 pcie_device_put(pcie_device);
11440 pcie_device_get(pcie_device);
11441 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11443 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11447 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
11448 * @ioc: per adapter object
11450 * Called during initial loading of the driver.
11453 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11455 struct _pcie_device *pcie_device;
11458 /* PCIe Device List */
11459 while ((pcie_device = get_next_pcie_device(ioc))) {
11460 if (pcie_device->starget) {
11461 pcie_device_put(pcie_device);
11464 if (pcie_device->access_status ==
11465 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11466 pcie_device_make_active(ioc, pcie_device);
11467 pcie_device_put(pcie_device);
11470 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11471 pcie_device->id, 0);
11473 _scsih_pcie_device_remove(ioc, pcie_device);
11474 pcie_device_put(pcie_device);
11476 } else if (!pcie_device->starget) {
11478 * When async scanning is enabled, its not possible to
11479 * remove devices while scanning is turned on due to an
11480 * oops in scsi_sysfs_add_sdev()->add_device()->
11481 * sysfs_addrm_start()
11483 if (!ioc->is_driver_loading) {
11484 /* TODO-- Need to find out whether this condition will
11487 _scsih_pcie_device_remove(ioc, pcie_device);
11488 pcie_device_put(pcie_device);
11492 pcie_device_make_active(ioc, pcie_device);
11493 pcie_device_put(pcie_device);
11498 * _scsih_probe_devices - probing for devices
11499 * @ioc: per adapter object
11501 * Called during initial loading of the driver.
11504 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11506 u16 volume_mapping_flags;
11508 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11509 return; /* return when IOC doesn't support initiator mode */
11511 _scsih_probe_boot_devices(ioc);
11513 if (ioc->ir_firmware) {
11514 volume_mapping_flags =
11515 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11516 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11517 if (volume_mapping_flags ==
11518 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11519 _scsih_probe_raid(ioc);
11520 _scsih_probe_sas(ioc);
11522 _scsih_probe_sas(ioc);
11523 _scsih_probe_raid(ioc);
11526 _scsih_probe_sas(ioc);
11527 _scsih_probe_pcie(ioc);
11532 * scsih_scan_start - scsi lld callback for .scan_start
11533 * @shost: SCSI host pointer
11535 * The shost has the ability to discover targets on its own instead
11536 * of scanning the entire bus. In our implemention, we will kick off
11537 * firmware discovery.
11540 scsih_scan_start(struct Scsi_Host *shost)
11542 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11544 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11545 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
11546 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11547 mpt3sas_enable_diag_buffer(ioc, 1);
11549 if (disable_discovery > 0)
11552 ioc->start_scan = 1;
11553 rc = mpt3sas_port_enable(ioc);
11556 ioc_info(ioc, "port enable: FAILED\n");
11560 * scsih_scan_finished - scsi lld callback for .scan_finished
11561 * @shost: SCSI host pointer
11562 * @time: elapsed time of the scan in jiffies
11564 * This function will be called periodicallyn until it returns 1 with the
11565 * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
11566 * we wait for firmware discovery to complete, then return 1.
11569 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
11571 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11573 if (disable_discovery > 0) {
11574 ioc->is_driver_loading = 0;
11575 ioc->wait_for_discovery_to_complete = 0;
11579 if (time >= (300 * HZ)) {
11580 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11581 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
11582 ioc->is_driver_loading = 0;
11586 if (ioc->start_scan)
11589 if (ioc->start_scan_failed) {
11590 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11591 ioc->start_scan_failed);
11592 ioc->is_driver_loading = 0;
11593 ioc->wait_for_discovery_to_complete = 0;
11594 ioc->remove_host = 1;
11598 ioc_info(ioc, "port enable: SUCCESS\n");
11599 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11601 if (ioc->wait_for_discovery_to_complete) {
11602 ioc->wait_for_discovery_to_complete = 0;
11603 _scsih_probe_devices(ioc);
11605 mpt3sas_base_start_watchdog(ioc);
11606 ioc->is_driver_loading = 0;
11611 * scsih_map_queues - map reply queues with request queues
11612 * @shost: SCSI host pointer
11614 static int scsih_map_queues(struct Scsi_Host *shost)
11616 struct MPT3SAS_ADAPTER *ioc =
11617 (struct MPT3SAS_ADAPTER *)shost->hostdata;
11619 if (ioc->shost->nr_hw_queues == 1)
11622 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
11623 ioc->pdev, ioc->high_iops_queues);
11626 /* shost template for SAS 2.0 HBA devices */
11627 static struct scsi_host_template mpt2sas_driver_template = {
11628 .module = THIS_MODULE,
11629 .name = "Fusion MPT SAS Host",
11630 .proc_name = MPT2SAS_DRIVER_NAME,
11631 .queuecommand = scsih_qcmd,
11632 .target_alloc = scsih_target_alloc,
11633 .slave_alloc = scsih_slave_alloc,
11634 .slave_configure = scsih_slave_configure,
11635 .target_destroy = scsih_target_destroy,
11636 .slave_destroy = scsih_slave_destroy,
11637 .scan_finished = scsih_scan_finished,
11638 .scan_start = scsih_scan_start,
11639 .change_queue_depth = scsih_change_queue_depth,
11640 .eh_abort_handler = scsih_abort,
11641 .eh_device_reset_handler = scsih_dev_reset,
11642 .eh_target_reset_handler = scsih_target_reset,
11643 .eh_host_reset_handler = scsih_host_reset,
11644 .bios_param = scsih_bios_param,
11647 .sg_tablesize = MPT2SAS_SG_DEPTH,
11648 .max_sectors = 32767,
11650 .shost_attrs = mpt3sas_host_attrs,
11651 .sdev_attrs = mpt3sas_dev_attrs,
11652 .track_queue_depth = 1,
11653 .cmd_size = sizeof(struct scsiio_tracker),
11656 /* raid transport support for SAS 2.0 HBA devices */
11657 static struct raid_function_template mpt2sas_raid_functions = {
11658 .cookie = &mpt2sas_driver_template,
11659 .is_raid = scsih_is_raid,
11660 .get_resync = scsih_get_resync,
11661 .get_state = scsih_get_state,
11664 /* shost template for SAS 3.0 HBA devices */
11665 static struct scsi_host_template mpt3sas_driver_template = {
11666 .module = THIS_MODULE,
11667 .name = "Fusion MPT SAS Host",
11668 .proc_name = MPT3SAS_DRIVER_NAME,
11669 .queuecommand = scsih_qcmd,
11670 .target_alloc = scsih_target_alloc,
11671 .slave_alloc = scsih_slave_alloc,
11672 .slave_configure = scsih_slave_configure,
11673 .target_destroy = scsih_target_destroy,
11674 .slave_destroy = scsih_slave_destroy,
11675 .scan_finished = scsih_scan_finished,
11676 .scan_start = scsih_scan_start,
11677 .change_queue_depth = scsih_change_queue_depth,
11678 .eh_abort_handler = scsih_abort,
11679 .eh_device_reset_handler = scsih_dev_reset,
11680 .eh_target_reset_handler = scsih_target_reset,
11681 .eh_host_reset_handler = scsih_host_reset,
11682 .bios_param = scsih_bios_param,
11685 .sg_tablesize = MPT3SAS_SG_DEPTH,
11686 .max_sectors = 32767,
11687 .max_segment_size = 0xffffffff,
11689 .shost_attrs = mpt3sas_host_attrs,
11690 .sdev_attrs = mpt3sas_dev_attrs,
11691 .track_queue_depth = 1,
11692 .cmd_size = sizeof(struct scsiio_tracker),
11693 .map_queues = scsih_map_queues,
11696 /* raid transport support for SAS 3.0 HBA devices */
11697 static struct raid_function_template mpt3sas_raid_functions = {
11698 .cookie = &mpt3sas_driver_template,
11699 .is_raid = scsih_is_raid,
11700 .get_resync = scsih_get_resync,
11701 .get_state = scsih_get_state,
11705 * _scsih_determine_hba_mpi_version - determine in which MPI version class
11706 * this device belongs to.
11707 * @pdev: PCI device struct
11709 * return MPI2_VERSION for SAS 2.0 HBA devices,
11710 * MPI25_VERSION for SAS 3.0 HBA devices, and
11711 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
11714 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
11717 switch (pdev->device) {
11718 case MPI2_MFGPAGE_DEVID_SSS6200:
11719 case MPI2_MFGPAGE_DEVID_SAS2004:
11720 case MPI2_MFGPAGE_DEVID_SAS2008:
11721 case MPI2_MFGPAGE_DEVID_SAS2108_1:
11722 case MPI2_MFGPAGE_DEVID_SAS2108_2:
11723 case MPI2_MFGPAGE_DEVID_SAS2108_3:
11724 case MPI2_MFGPAGE_DEVID_SAS2116_1:
11725 case MPI2_MFGPAGE_DEVID_SAS2116_2:
11726 case MPI2_MFGPAGE_DEVID_SAS2208_1:
11727 case MPI2_MFGPAGE_DEVID_SAS2208_2:
11728 case MPI2_MFGPAGE_DEVID_SAS2208_3:
11729 case MPI2_MFGPAGE_DEVID_SAS2208_4:
11730 case MPI2_MFGPAGE_DEVID_SAS2208_5:
11731 case MPI2_MFGPAGE_DEVID_SAS2208_6:
11732 case MPI2_MFGPAGE_DEVID_SAS2308_1:
11733 case MPI2_MFGPAGE_DEVID_SAS2308_2:
11734 case MPI2_MFGPAGE_DEVID_SAS2308_3:
11735 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
11736 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
11737 return MPI2_VERSION;
11738 case MPI25_MFGPAGE_DEVID_SAS3004:
11739 case MPI25_MFGPAGE_DEVID_SAS3008:
11740 case MPI25_MFGPAGE_DEVID_SAS3108_1:
11741 case MPI25_MFGPAGE_DEVID_SAS3108_2:
11742 case MPI25_MFGPAGE_DEVID_SAS3108_5:
11743 case MPI25_MFGPAGE_DEVID_SAS3108_6:
11744 return MPI25_VERSION;
11745 case MPI26_MFGPAGE_DEVID_SAS3216:
11746 case MPI26_MFGPAGE_DEVID_SAS3224:
11747 case MPI26_MFGPAGE_DEVID_SAS3316_1:
11748 case MPI26_MFGPAGE_DEVID_SAS3316_2:
11749 case MPI26_MFGPAGE_DEVID_SAS3316_3:
11750 case MPI26_MFGPAGE_DEVID_SAS3316_4:
11751 case MPI26_MFGPAGE_DEVID_SAS3324_1:
11752 case MPI26_MFGPAGE_DEVID_SAS3324_2:
11753 case MPI26_MFGPAGE_DEVID_SAS3324_3:
11754 case MPI26_MFGPAGE_DEVID_SAS3324_4:
11755 case MPI26_MFGPAGE_DEVID_SAS3508:
11756 case MPI26_MFGPAGE_DEVID_SAS3508_1:
11757 case MPI26_MFGPAGE_DEVID_SAS3408:
11758 case MPI26_MFGPAGE_DEVID_SAS3516:
11759 case MPI26_MFGPAGE_DEVID_SAS3516_1:
11760 case MPI26_MFGPAGE_DEVID_SAS3416:
11761 case MPI26_MFGPAGE_DEVID_SAS3616:
11762 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
11763 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
11764 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
11765 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
11766 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
11767 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
11768 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
11769 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
11770 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
11771 return MPI26_VERSION;
11777 * _scsih_probe - attach and add scsi host
11778 * @pdev: PCI device struct
11779 * @id: pci device id
11781 * Return: 0 success, anything else error.
11784 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
11786 struct MPT3SAS_ADAPTER *ioc;
11787 struct Scsi_Host *shost = NULL;
11789 u16 hba_mpi_version;
11791 /* Determine in which MPI version class this pci device belongs */
11792 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
11793 if (hba_mpi_version == 0)
11796 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
11797 * for other generation HBA's return with -ENODEV
11799 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
11802 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
11803 * for other generation HBA's return with -ENODEV
11805 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
11806 || hba_mpi_version == MPI26_VERSION)))
11809 switch (hba_mpi_version) {
11811 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
11812 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
11813 /* Use mpt2sas driver host template for SAS 2.0 HBA's */
11814 shost = scsi_host_alloc(&mpt2sas_driver_template,
11815 sizeof(struct MPT3SAS_ADAPTER));
11818 ioc = shost_priv(shost);
11819 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
11820 ioc->hba_mpi_version_belonged = hba_mpi_version;
11821 ioc->id = mpt2_ids++;
11822 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
11823 switch (pdev->device) {
11824 case MPI2_MFGPAGE_DEVID_SSS6200:
11825 ioc->is_warpdrive = 1;
11826 ioc->hide_ir_msg = 1;
11828 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
11829 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
11830 ioc->is_mcpu_endpoint = 1;
11833 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
11837 if (multipath_on_hba == -1 || multipath_on_hba == 0)
11838 ioc->multipath_on_hba = 0;
11840 ioc->multipath_on_hba = 1;
11843 case MPI25_VERSION:
11844 case MPI26_VERSION:
11845 /* Use mpt3sas driver host template for SAS 3.0 HBA's */
11846 shost = scsi_host_alloc(&mpt3sas_driver_template,
11847 sizeof(struct MPT3SAS_ADAPTER));
11850 ioc = shost_priv(shost);
11851 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
11852 ioc->hba_mpi_version_belonged = hba_mpi_version;
11853 ioc->id = mpt3_ids++;
11854 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
11855 switch (pdev->device) {
11856 case MPI26_MFGPAGE_DEVID_SAS3508:
11857 case MPI26_MFGPAGE_DEVID_SAS3508_1:
11858 case MPI26_MFGPAGE_DEVID_SAS3408:
11859 case MPI26_MFGPAGE_DEVID_SAS3516:
11860 case MPI26_MFGPAGE_DEVID_SAS3516_1:
11861 case MPI26_MFGPAGE_DEVID_SAS3416:
11862 case MPI26_MFGPAGE_DEVID_SAS3616:
11863 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
11864 ioc->is_gen35_ioc = 1;
11866 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
11867 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
11868 dev_err(&pdev->dev,
11869 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
11870 pdev->device, pdev->subsystem_vendor,
11871 pdev->subsystem_device);
11873 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
11874 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
11875 dev_err(&pdev->dev,
11876 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
11877 pdev->device, pdev->subsystem_vendor,
11878 pdev->subsystem_device);
11880 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
11881 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
11882 dev_info(&pdev->dev,
11883 "HBA is in Configurable Secure mode\n");
11885 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
11886 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
11887 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
11890 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
11892 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
11893 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
11894 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
11895 ioc->combined_reply_queue = 1;
11896 if (ioc->is_gen35_ioc)
11897 ioc->combined_reply_index_count =
11898 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
11900 ioc->combined_reply_index_count =
11901 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
11904 switch (ioc->is_gen35_ioc) {
11906 if (multipath_on_hba == -1 || multipath_on_hba == 0)
11907 ioc->multipath_on_hba = 0;
11909 ioc->multipath_on_hba = 1;
11912 if (multipath_on_hba == -1 || multipath_on_hba > 0)
11913 ioc->multipath_on_hba = 1;
11915 ioc->multipath_on_hba = 0;
11925 INIT_LIST_HEAD(&ioc->list);
11926 spin_lock(&gioc_lock);
11927 list_add_tail(&ioc->list, &mpt3sas_ioc_list);
11928 spin_unlock(&gioc_lock);
11929 ioc->shost = shost;
11931 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
11932 ioc->tm_cb_idx = tm_cb_idx;
11933 ioc->ctl_cb_idx = ctl_cb_idx;
11934 ioc->base_cb_idx = base_cb_idx;
11935 ioc->port_enable_cb_idx = port_enable_cb_idx;
11936 ioc->transport_cb_idx = transport_cb_idx;
11937 ioc->scsih_cb_idx = scsih_cb_idx;
11938 ioc->config_cb_idx = config_cb_idx;
11939 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
11940 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
11941 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
11942 ioc->logging_level = logging_level;
11943 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
11944 /* Host waits for minimum of six seconds */
11945 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
11947 * Enable MEMORY MOVE support flag.
11949 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
11950 /* Enable ADDITIONAL QUERY support flag. */
11951 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
11953 ioc->enable_sdev_max_qd = enable_sdev_max_qd;
11955 /* misc semaphores and spin locks */
11956 mutex_init(&ioc->reset_in_progress_mutex);
11957 /* initializing pci_access_mutex lock */
11958 mutex_init(&ioc->pci_access_mutex);
11959 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
11960 spin_lock_init(&ioc->scsi_lookup_lock);
11961 spin_lock_init(&ioc->sas_device_lock);
11962 spin_lock_init(&ioc->sas_node_lock);
11963 spin_lock_init(&ioc->fw_event_lock);
11964 spin_lock_init(&ioc->raid_device_lock);
11965 spin_lock_init(&ioc->pcie_device_lock);
11966 spin_lock_init(&ioc->diag_trigger_lock);
11968 INIT_LIST_HEAD(&ioc->sas_device_list);
11969 INIT_LIST_HEAD(&ioc->sas_device_init_list);
11970 INIT_LIST_HEAD(&ioc->sas_expander_list);
11971 INIT_LIST_HEAD(&ioc->enclosure_list);
11972 INIT_LIST_HEAD(&ioc->pcie_device_list);
11973 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
11974 INIT_LIST_HEAD(&ioc->fw_event_list);
11975 INIT_LIST_HEAD(&ioc->raid_device_list);
11976 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
11977 INIT_LIST_HEAD(&ioc->delayed_tr_list);
11978 INIT_LIST_HEAD(&ioc->delayed_sc_list);
11979 INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
11980 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
11981 INIT_LIST_HEAD(&ioc->reply_queue_list);
11982 INIT_LIST_HEAD(&ioc->port_table_list);
11984 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
11986 /* init shost parameters */
11987 shost->max_cmd_len = 32;
11988 shost->max_lun = max_lun;
11989 shost->transportt = mpt3sas_transport_template;
11990 shost->unique_id = ioc->id;
11992 if (ioc->is_mcpu_endpoint) {
11993 /* mCPU MPI support 64K max IO */
11994 shost->max_sectors = 128;
11995 ioc_info(ioc, "The max_sectors value is set to %d\n",
11996 shost->max_sectors);
11998 if (max_sectors != 0xFFFF) {
11999 if (max_sectors < 64) {
12000 shost->max_sectors = 64;
12001 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
12003 } else if (max_sectors > 32767) {
12004 shost->max_sectors = 32767;
12005 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
12008 shost->max_sectors = max_sectors & 0xFFFE;
12009 ioc_info(ioc, "The max_sectors value is set to %d\n",
12010 shost->max_sectors);
12014 /* register EEDP capabilities with SCSI layer */
12015 if (prot_mask >= 0)
12016 scsi_host_set_prot(shost, (prot_mask & 0x07));
12018 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
12019 | SHOST_DIF_TYPE2_PROTECTION
12020 | SHOST_DIF_TYPE3_PROTECTION);
12022 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
12025 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
12026 "fw_event_%s%d", ioc->driver_name, ioc->id);
12027 ioc->firmware_event_thread = alloc_ordered_workqueue(
12028 ioc->firmware_event_name, 0);
12029 if (!ioc->firmware_event_thread) {
12030 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12031 __FILE__, __LINE__, __func__);
12033 goto out_thread_fail;
12036 ioc->is_driver_loading = 1;
12037 if ((mpt3sas_base_attach(ioc))) {
12038 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12039 __FILE__, __LINE__, __func__);
12041 goto out_attach_fail;
12044 if (ioc->is_warpdrive) {
12045 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
12046 ioc->hide_drives = 0;
12047 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
12048 ioc->hide_drives = 1;
12050 if (mpt3sas_get_num_volumes(ioc))
12051 ioc->hide_drives = 1;
12053 ioc->hide_drives = 0;
12056 ioc->hide_drives = 0;
12058 shost->host_tagset = 0;
12059 shost->nr_hw_queues = 1;
12061 if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 &&
12062 host_tagset_enable && ioc->smp_affinity_enable) {
12064 shost->host_tagset = 1;
12065 shost->nr_hw_queues =
12066 ioc->reply_queue_count - ioc->high_iops_queues;
12068 dev_info(&ioc->pdev->dev,
12069 "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
12070 shost->can_queue, shost->nr_hw_queues);
12073 rv = scsi_add_host(shost, &pdev->dev);
12075 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12076 __FILE__, __LINE__, __func__);
12077 goto out_add_shost_fail;
12080 scsi_scan_host(shost);
12081 mpt3sas_setup_debugfs(ioc);
12083 out_add_shost_fail:
12084 mpt3sas_base_detach(ioc);
12086 destroy_workqueue(ioc->firmware_event_thread);
12088 spin_lock(&gioc_lock);
12089 list_del(&ioc->list);
12090 spin_unlock(&gioc_lock);
12091 scsi_host_put(shost);
12096 * scsih_suspend - power management suspend main entry point
12097 * @dev: Device struct
12099 * Return: 0 success, anything else error.
12101 static int __maybe_unused
12102 scsih_suspend(struct device *dev)
12104 struct pci_dev *pdev = to_pci_dev(dev);
12105 struct Scsi_Host *shost;
12106 struct MPT3SAS_ADAPTER *ioc;
12109 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12113 mpt3sas_base_stop_watchdog(ioc);
12114 flush_scheduled_work();
12115 scsi_block_requests(shost);
12116 _scsih_nvme_shutdown(ioc);
12117 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12118 pdev, pci_name(pdev));
12120 mpt3sas_base_free_resources(ioc);
12125 * scsih_resume - power management resume main entry point
12126 * @dev: Device struct
12128 * Return: 0 success, anything else error.
12130 static int __maybe_unused
12131 scsih_resume(struct device *dev)
12133 struct pci_dev *pdev = to_pci_dev(dev);
12134 struct Scsi_Host *shost;
12135 struct MPT3SAS_ADAPTER *ioc;
12136 pci_power_t device_state = pdev->current_state;
12139 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12143 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12144 pdev, pci_name(pdev), device_state);
12147 r = mpt3sas_base_map_resources(ioc);
12150 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
12151 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
12152 scsi_unblock_requests(shost);
12153 mpt3sas_base_start_watchdog(ioc);
12158 * scsih_pci_error_detected - Called when a PCI error is detected.
12159 * @pdev: PCI device struct
12160 * @state: PCI channel state
12162 * Description: Called when a PCI error is detected.
12164 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
12166 static pci_ers_result_t
12167 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12169 struct Scsi_Host *shost;
12170 struct MPT3SAS_ADAPTER *ioc;
12172 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12173 return PCI_ERS_RESULT_DISCONNECT;
12175 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
12178 case pci_channel_io_normal:
12179 return PCI_ERS_RESULT_CAN_RECOVER;
12180 case pci_channel_io_frozen:
12181 /* Fatal error, prepare for slot reset */
12182 ioc->pci_error_recovery = 1;
12183 scsi_block_requests(ioc->shost);
12184 mpt3sas_base_stop_watchdog(ioc);
12185 mpt3sas_base_free_resources(ioc);
12186 return PCI_ERS_RESULT_NEED_RESET;
12187 case pci_channel_io_perm_failure:
12188 /* Permanent error, prepare for device removal */
12189 ioc->pci_error_recovery = 1;
12190 mpt3sas_base_stop_watchdog(ioc);
12191 _scsih_flush_running_cmds(ioc);
12192 return PCI_ERS_RESULT_DISCONNECT;
12194 return PCI_ERS_RESULT_NEED_RESET;
12198 * scsih_pci_slot_reset - Called when PCI slot has been reset.
12199 * @pdev: PCI device struct
12201 * Description: This routine is called by the pci error recovery
12202 * code after the PCI slot has been reset, just before we
12203 * should resume normal operations.
12205 static pci_ers_result_t
12206 scsih_pci_slot_reset(struct pci_dev *pdev)
12208 struct Scsi_Host *shost;
12209 struct MPT3SAS_ADAPTER *ioc;
12212 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12213 return PCI_ERS_RESULT_DISCONNECT;
12215 ioc_info(ioc, "PCI error: slot reset callback!!\n");
12217 ioc->pci_error_recovery = 0;
12219 pci_restore_state(pdev);
12220 rc = mpt3sas_base_map_resources(ioc);
12222 return PCI_ERS_RESULT_DISCONNECT;
12224 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
12225 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
12227 ioc_warn(ioc, "hard reset: %s\n",
12228 (rc == 0) ? "success" : "failed");
12231 return PCI_ERS_RESULT_RECOVERED;
12233 return PCI_ERS_RESULT_DISCONNECT;
12237 * scsih_pci_resume() - resume normal ops after PCI reset
12238 * @pdev: pointer to PCI device
12240 * Called when the error recovery driver tells us that its
12241 * OK to resume normal operation. Use completion to allow
12242 * halted scsi ops to resume.
12245 scsih_pci_resume(struct pci_dev *pdev)
12247 struct Scsi_Host *shost;
12248 struct MPT3SAS_ADAPTER *ioc;
12250 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12253 ioc_info(ioc, "PCI error: resume callback!!\n");
12255 mpt3sas_base_start_watchdog(ioc);
12256 scsi_unblock_requests(ioc->shost);
12260 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
12261 * @pdev: pointer to PCI device
12263 static pci_ers_result_t
12264 scsih_pci_mmio_enabled(struct pci_dev *pdev)
12266 struct Scsi_Host *shost;
12267 struct MPT3SAS_ADAPTER *ioc;
12269 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12270 return PCI_ERS_RESULT_DISCONNECT;
12272 ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
12274 /* TODO - dump whatever for debugging purposes */
12276 /* This called only if scsih_pci_error_detected returns
12277 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
12278 * works, no need to reset slot.
12280 return PCI_ERS_RESULT_RECOVERED;
12284 * scsih__ncq_prio_supp - Check for NCQ command priority support
12285 * @sdev: scsi device struct
12287 * This is called when a user indicates they would like to enable
12288 * ncq command priorities. This works only on SATA devices.
12290 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
12292 unsigned char *buf;
12293 bool ncq_prio_supp = false;
12295 if (!scsi_device_supports_vpd(sdev))
12296 return ncq_prio_supp;
12298 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
12300 return ncq_prio_supp;
12302 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
12303 ncq_prio_supp = (buf[213] >> 4) & 1;
12306 return ncq_prio_supp;
12309 * The pci device ids are defined in mpi/mpi2_cnfg.h.
12311 static const struct pci_device_id mpt3sas_pci_table[] = {
12312 /* Spitfire ~ 2004 */
12313 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12314 PCI_ANY_ID, PCI_ANY_ID },
12315 /* Falcon ~ 2008 */
12316 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12317 PCI_ANY_ID, PCI_ANY_ID },
12318 /* Liberator ~ 2108 */
12319 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12320 PCI_ANY_ID, PCI_ANY_ID },
12321 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12322 PCI_ANY_ID, PCI_ANY_ID },
12323 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12324 PCI_ANY_ID, PCI_ANY_ID },
12325 /* Meteor ~ 2116 */
12326 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12327 PCI_ANY_ID, PCI_ANY_ID },
12328 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12329 PCI_ANY_ID, PCI_ANY_ID },
12330 /* Thunderbolt ~ 2208 */
12331 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12332 PCI_ANY_ID, PCI_ANY_ID },
12333 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12334 PCI_ANY_ID, PCI_ANY_ID },
12335 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12336 PCI_ANY_ID, PCI_ANY_ID },
12337 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12338 PCI_ANY_ID, PCI_ANY_ID },
12339 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12340 PCI_ANY_ID, PCI_ANY_ID },
12341 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12342 PCI_ANY_ID, PCI_ANY_ID },
12343 /* Mustang ~ 2308 */
12344 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12345 PCI_ANY_ID, PCI_ANY_ID },
12346 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12347 PCI_ANY_ID, PCI_ANY_ID },
12348 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12349 PCI_ANY_ID, PCI_ANY_ID },
12350 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
12351 PCI_ANY_ID, PCI_ANY_ID },
12352 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12353 PCI_ANY_ID, PCI_ANY_ID },
12355 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12356 PCI_ANY_ID, PCI_ANY_ID },
12357 /* Fury ~ 3004 and 3008 */
12358 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12359 PCI_ANY_ID, PCI_ANY_ID },
12360 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12361 PCI_ANY_ID, PCI_ANY_ID },
12362 /* Invader ~ 3108 */
12363 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12364 PCI_ANY_ID, PCI_ANY_ID },
12365 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12366 PCI_ANY_ID, PCI_ANY_ID },
12367 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12368 PCI_ANY_ID, PCI_ANY_ID },
12369 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12370 PCI_ANY_ID, PCI_ANY_ID },
12371 /* Cutlass ~ 3216 and 3224 */
12372 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12373 PCI_ANY_ID, PCI_ANY_ID },
12374 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12375 PCI_ANY_ID, PCI_ANY_ID },
12376 /* Intruder ~ 3316 and 3324 */
12377 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12378 PCI_ANY_ID, PCI_ANY_ID },
12379 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12380 PCI_ANY_ID, PCI_ANY_ID },
12381 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12382 PCI_ANY_ID, PCI_ANY_ID },
12383 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12384 PCI_ANY_ID, PCI_ANY_ID },
12385 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12386 PCI_ANY_ID, PCI_ANY_ID },
12387 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12388 PCI_ANY_ID, PCI_ANY_ID },
12389 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12390 PCI_ANY_ID, PCI_ANY_ID },
12391 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12392 PCI_ANY_ID, PCI_ANY_ID },
12393 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
12394 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12395 PCI_ANY_ID, PCI_ANY_ID },
12396 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12397 PCI_ANY_ID, PCI_ANY_ID },
12398 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12399 PCI_ANY_ID, PCI_ANY_ID },
12400 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12401 PCI_ANY_ID, PCI_ANY_ID },
12402 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12403 PCI_ANY_ID, PCI_ANY_ID },
12404 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12405 PCI_ANY_ID, PCI_ANY_ID },
12406 /* Mercator ~ 3616*/
12407 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12408 PCI_ANY_ID, PCI_ANY_ID },
12410 /* Aero SI 0x00E1 Configurable Secure
12411 * 0x00E2 Hard Secure
12413 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12414 PCI_ANY_ID, PCI_ANY_ID },
12415 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12416 PCI_ANY_ID, PCI_ANY_ID },
12419 * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
12421 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12422 PCI_ANY_ID, PCI_ANY_ID },
12423 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12424 PCI_ANY_ID, PCI_ANY_ID },
12426 /* Atlas PCIe Switch Management Port */
12427 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12428 PCI_ANY_ID, PCI_ANY_ID },
12430 /* Sea SI 0x00E5 Configurable Secure
12431 * 0x00E6 Hard Secure
12433 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12434 PCI_ANY_ID, PCI_ANY_ID },
12435 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12436 PCI_ANY_ID, PCI_ANY_ID },
12439 * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
12441 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12442 PCI_ANY_ID, PCI_ANY_ID },
12443 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12444 PCI_ANY_ID, PCI_ANY_ID },
12446 {0} /* Terminating entry */
12448 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12450 static struct pci_error_handlers _mpt3sas_err_handler = {
12451 .error_detected = scsih_pci_error_detected,
12452 .mmio_enabled = scsih_pci_mmio_enabled,
12453 .slot_reset = scsih_pci_slot_reset,
12454 .resume = scsih_pci_resume,
12457 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12459 static struct pci_driver mpt3sas_driver = {
12460 .name = MPT3SAS_DRIVER_NAME,
12461 .id_table = mpt3sas_pci_table,
12462 .probe = _scsih_probe,
12463 .remove = scsih_remove,
12464 .shutdown = scsih_shutdown,
12465 .err_handler = &_mpt3sas_err_handler,
12466 .driver.pm = &scsih_pm_ops,
12470 * scsih_init - main entry point for this driver.
12472 * Return: 0 success, anything else error.
12480 mpt3sas_base_initialize_callback_handler();
12482 /* queuecommand callback hander */
12483 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12485 /* task management callback handler */
12486 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12488 /* base internal commands callback handler */
12489 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12490 port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12491 mpt3sas_port_enable_done);
12493 /* transport internal commands callback handler */
12494 transport_cb_idx = mpt3sas_base_register_callback_handler(
12495 mpt3sas_transport_done);
12497 /* scsih internal commands callback handler */
12498 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12500 /* configuration page API internal commands callback handler */
12501 config_cb_idx = mpt3sas_base_register_callback_handler(
12502 mpt3sas_config_done);
12504 /* ctl module callback handler */
12505 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12507 tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12508 _scsih_tm_tr_complete);
12510 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12511 _scsih_tm_volume_tr_complete);
12513 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12514 _scsih_sas_control_complete);
12516 mpt3sas_init_debugfs();
12521 * scsih_exit - exit point for this driver (when it is a module).
12523 * Return: 0 success, anything else error.
12529 mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12530 mpt3sas_base_release_callback_handler(tm_cb_idx);
12531 mpt3sas_base_release_callback_handler(base_cb_idx);
12532 mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12533 mpt3sas_base_release_callback_handler(transport_cb_idx);
12534 mpt3sas_base_release_callback_handler(scsih_cb_idx);
12535 mpt3sas_base_release_callback_handler(config_cb_idx);
12536 mpt3sas_base_release_callback_handler(ctl_cb_idx);
12538 mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12539 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12540 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12542 /* raid transport support */
12543 if (hbas_to_enumerate != 1)
12544 raid_class_release(mpt3sas_raid_template);
12545 if (hbas_to_enumerate != 2)
12546 raid_class_release(mpt2sas_raid_template);
12547 sas_release_transport(mpt3sas_transport_template);
12548 mpt3sas_exit_debugfs();
12552 * _mpt3sas_init - main entry point for this driver.
12554 * Return: 0 success, anything else error.
12557 _mpt3sas_init(void)
12561 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12562 MPT3SAS_DRIVER_VERSION);
12564 mpt3sas_transport_template =
12565 sas_attach_transport(&mpt3sas_transport_functions);
12566 if (!mpt3sas_transport_template)
12569 /* No need attach mpt3sas raid functions template
12570 * if hbas_to_enumarate value is one.
12572 if (hbas_to_enumerate != 1) {
12573 mpt3sas_raid_template =
12574 raid_class_attach(&mpt3sas_raid_functions);
12575 if (!mpt3sas_raid_template) {
12576 sas_release_transport(mpt3sas_transport_template);
12581 /* No need to attach mpt2sas raid functions template
12582 * if hbas_to_enumarate value is two
12584 if (hbas_to_enumerate != 2) {
12585 mpt2sas_raid_template =
12586 raid_class_attach(&mpt2sas_raid_functions);
12587 if (!mpt2sas_raid_template) {
12588 sas_release_transport(mpt3sas_transport_template);
12593 error = scsih_init();
12599 mpt3sas_ctl_init(hbas_to_enumerate);
12601 error = pci_register_driver(&mpt3sas_driver);
12609 * _mpt3sas_exit - exit point for this driver (when it is a module).
12613 _mpt3sas_exit(void)
12615 pr_info("mpt3sas version %s unloading\n",
12616 MPT3SAS_DRIVER_VERSION);
12618 mpt3sas_ctl_exit(hbas_to_enumerate);
12620 pci_unregister_driver(&mpt3sas_driver);
12625 module_init(_mpt3sas_init);
12626 module_exit(_mpt3sas_exit);