2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5 * Copyright (C) 2012-2014 LSI Corporation
6 * Copyright (C) 2013-2014 Avago Technologies
7 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/aer.h>
56 #include <linux/raid_class.h>
57 #include <linux/blk-mq-pci.h>
58 #include <asm/unaligned.h>
60 #include "mpt3sas_base.h"
62 #define RAID_CHANNEL 1
64 #define PCIE_CHANNEL 2
67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
68 struct _sas_node *sas_expander);
69 static void _firmware_event_work(struct work_struct *work);
71 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
72 struct _sas_device *sas_device);
73 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
74 u8 retry_count, u8 is_pd);
75 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
76 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
77 struct _pcie_device *pcie_device);
79 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
80 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
82 /* global parameters */
83 LIST_HEAD(mpt3sas_ioc_list);
84 /* global ioc lock for list operations */
85 DEFINE_SPINLOCK(gioc_lock);
87 MODULE_AUTHOR(MPT3SAS_AUTHOR);
88 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
91 MODULE_ALIAS("mpt2sas");
93 /* local parameters */
94 static u8 scsi_io_cb_idx = -1;
95 static u8 tm_cb_idx = -1;
96 static u8 ctl_cb_idx = -1;
97 static u8 base_cb_idx = -1;
98 static u8 port_enable_cb_idx = -1;
99 static u8 transport_cb_idx = -1;
100 static u8 scsih_cb_idx = -1;
101 static u8 config_cb_idx = -1;
105 static u8 tm_tr_cb_idx = -1 ;
106 static u8 tm_tr_volume_cb_idx = -1 ;
107 static u8 tm_sas_control_cb_idx = -1;
109 /* command line options */
110 static u32 logging_level;
111 MODULE_PARM_DESC(logging_level,
112 " bits for enabling additional logging info (default=0)");
115 static ushort max_sectors = 0xFFFF;
116 module_param(max_sectors, ushort, 0444);
117 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
120 static int missing_delay[2] = {-1, -1};
121 module_param_array(missing_delay, int, NULL, 0444);
122 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
124 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
125 #define MPT3SAS_MAX_LUN (16895)
126 static u64 max_lun = MPT3SAS_MAX_LUN;
127 module_param(max_lun, ullong, 0444);
128 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
130 static ushort hbas_to_enumerate;
131 module_param(hbas_to_enumerate, ushort, 0444);
132 MODULE_PARM_DESC(hbas_to_enumerate,
133 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
134 1 - enumerates only SAS 2.0 generation HBAs\n \
135 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
137 /* diag_buffer_enable is bitwise
139 * bit 1 set = SNAPSHOT
140 * bit 2 set = EXTENDED
142 * Either bit can be set, or both
144 static int diag_buffer_enable = -1;
145 module_param(diag_buffer_enable, int, 0444);
146 MODULE_PARM_DESC(diag_buffer_enable,
147 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
148 static int disable_discovery = -1;
149 module_param(disable_discovery, int, 0444);
150 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
153 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
154 static int prot_mask = -1;
155 module_param(prot_mask, int, 0444);
156 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
158 static bool enable_sdev_max_qd;
159 module_param(enable_sdev_max_qd, bool, 0444);
160 MODULE_PARM_DESC(enable_sdev_max_qd,
161 "Enable sdev max qd as can_queue, def=disabled(0)");
163 static int multipath_on_hba = -1;
164 module_param(multipath_on_hba, int, 0);
165 MODULE_PARM_DESC(multipath_on_hba,
166 "Multipath support to add same target device\n\t\t"
167 "as many times as it is visible to HBA from various paths\n\t\t"
169 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
170 "\t SAS 3.5 HBA - This will be enabled)");
172 static int host_tagset_enable = 1;
173 module_param(host_tagset_enable, int, 0444);
174 MODULE_PARM_DESC(host_tagset_enable,
175 "Shared host tagset enable/disable Default: enable(1)");
177 /* raid transport support */
178 static struct raid_template *mpt3sas_raid_template;
179 static struct raid_template *mpt2sas_raid_template;
183 * struct sense_info - common structure for obtaining sense keys
185 * @asc: additional sense code
186 * @ascq: additional sense code qualifier
194 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
195 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
196 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
197 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
198 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
200 * struct fw_event_work - firmware event struct
201 * @list: link list framework
202 * @work: work object (ioc->fault_reset_work_q)
203 * @ioc: per adapter object
204 * @device_handle: device handle
205 * @VF_ID: virtual function id
206 * @VP_ID: virtual port id
207 * @ignore: flag meaning this event has been marked to ignore
208 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
209 * @refcount: kref for this event
210 * @event_data: reply event data payload follows
212 * This object stored on ioc->fw_event_list.
214 struct fw_event_work {
215 struct list_head list;
216 struct work_struct work;
218 struct MPT3SAS_ADAPTER *ioc;
224 struct kref refcount;
225 char event_data[] __aligned(4);
228 static void fw_event_work_free(struct kref *r)
230 kfree(container_of(r, struct fw_event_work, refcount));
233 static void fw_event_work_get(struct fw_event_work *fw_work)
235 kref_get(&fw_work->refcount);
238 static void fw_event_work_put(struct fw_event_work *fw_work)
240 kref_put(&fw_work->refcount, fw_event_work_free);
243 static struct fw_event_work *alloc_fw_event_work(int len)
245 struct fw_event_work *fw_event;
247 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
251 kref_init(&fw_event->refcount);
256 * struct _scsi_io_transfer - scsi io transfer
257 * @handle: sas device handle (assigned by firmware)
258 * @is_raid: flag set for hidden raid components
259 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
260 * @data_length: data transfer length
261 * @data_dma: dma pointer to data
264 * @cdb_length: cdb length
266 * @timeout: timeout for this command
267 * @VF_ID: virtual function id
268 * @VP_ID: virtual port id
269 * @valid_reply: flag set for reply message
270 * @sense_length: sense length
271 * @ioc_status: ioc status
272 * @scsi_state: scsi state
273 * @scsi_status: scsi staus
274 * @log_info: log information
275 * @transfer_length: data length transfer when there is a reply message
277 * Used for sending internal scsi commands to devices within this module.
278 * Refer to _scsi_send_scsi_io().
280 struct _scsi_io_transfer {
283 enum dma_data_direction dir;
286 u8 sense[SCSI_SENSE_BUFFERSIZE];
294 /* the following bits are only valid when 'valid_reply = 1' */
304 * _scsih_set_debug_level - global setting of ioc->logging_level.
308 * Note: The logging levels are defined in mpt3sas_debug.h.
311 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
313 int ret = param_set_int(val, kp);
314 struct MPT3SAS_ADAPTER *ioc;
319 pr_info("setting logging_level(0x%08x)\n", logging_level);
320 spin_lock(&gioc_lock);
321 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
322 ioc->logging_level = logging_level;
323 spin_unlock(&gioc_lock);
326 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
327 &logging_level, 0644);
330 * _scsih_srch_boot_sas_address - search based on sas_address
331 * @sas_address: sas address
332 * @boot_device: boot device object from bios page 2
334 * Return: 1 when there's a match, 0 means no match.
337 _scsih_srch_boot_sas_address(u64 sas_address,
338 Mpi2BootDeviceSasWwid_t *boot_device)
340 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
344 * _scsih_srch_boot_device_name - search based on device name
345 * @device_name: device name specified in INDENTIFY fram
346 * @boot_device: boot device object from bios page 2
348 * Return: 1 when there's a match, 0 means no match.
351 _scsih_srch_boot_device_name(u64 device_name,
352 Mpi2BootDeviceDeviceName_t *boot_device)
354 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
358 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
359 * @enclosure_logical_id: enclosure logical id
360 * @slot_number: slot number
361 * @boot_device: boot device object from bios page 2
363 * Return: 1 when there's a match, 0 means no match.
366 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
367 Mpi2BootDeviceEnclosureSlot_t *boot_device)
369 return (enclosure_logical_id == le64_to_cpu(boot_device->
370 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
371 SlotNumber)) ? 1 : 0;
375 * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
376 * port number from port list
377 * @ioc: per adapter object
378 * @port_id: port number
379 * @bypass_dirty_port_flag: when set look the matching hba port entry even
380 * if hba port entry is marked as dirty.
382 * Search for hba port entry corresponding to provided port number,
383 * if available return port object otherwise return NULL.
386 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
387 u8 port_id, u8 bypass_dirty_port_flag)
389 struct hba_port *port, *port_next;
392 * When multipath_on_hba is disabled then
393 * search the hba_port entry using default
396 if (!ioc->multipath_on_hba)
397 port_id = MULTIPATH_DISABLED_PORT_ID;
399 list_for_each_entry_safe(port, port_next,
400 &ioc->port_table_list, list) {
401 if (port->port_id != port_id)
403 if (bypass_dirty_port_flag)
405 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
411 * Allocate hba_port object for default port id (i.e. 255)
412 * when multipath_on_hba is disabled for the HBA.
413 * And add this object to port_table_list.
415 if (!ioc->multipath_on_hba) {
416 port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
420 port->port_id = port_id;
422 "hba_port entry: %p, port: %d is added to hba_port list\n",
423 port, port->port_id);
424 list_add_tail(&port->list,
425 &ioc->port_table_list);
432 * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
433 * @ioc: per adapter object
434 * @port: hba_port object
437 * Return virtual_phy object corresponding to phy number.
440 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
441 struct hba_port *port, u32 phy)
443 struct virtual_phy *vphy, *vphy_next;
445 if (!port->vphys_mask)
448 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
449 if (vphy->phy_mask & (1 << phy))
456 * _scsih_is_boot_device - search for matching boot device.
457 * @sas_address: sas address
458 * @device_name: device name specified in INDENTIFY fram
459 * @enclosure_logical_id: enclosure logical id
461 * @form: specifies boot device form
462 * @boot_device: boot device object from bios page 2
464 * Return: 1 when there's a match, 0 means no match.
467 _scsih_is_boot_device(u64 sas_address, u64 device_name,
468 u64 enclosure_logical_id, u16 slot, u8 form,
469 Mpi2BiosPage2BootDevice_t *boot_device)
474 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
477 rc = _scsih_srch_boot_sas_address(
478 sas_address, &boot_device->SasWwid);
480 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
481 if (!enclosure_logical_id)
483 rc = _scsih_srch_boot_encl_slot(
484 enclosure_logical_id,
485 slot, &boot_device->EnclosureSlot);
487 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
490 rc = _scsih_srch_boot_device_name(
491 device_name, &boot_device->DeviceName);
493 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
501 * _scsih_get_sas_address - set the sas_address for given device handle
503 * @handle: device handle
504 * @sas_address: sas address
506 * Return: 0 success, non-zero when failure
509 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
512 Mpi2SasDevicePage0_t sas_device_pg0;
513 Mpi2ConfigReply_t mpi_reply;
518 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
519 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
520 ioc_err(ioc, "failure at %s:%d/%s()!\n",
521 __FILE__, __LINE__, __func__);
525 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
526 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
527 /* For HBA, vSES doesn't return HBA SAS address. Instead return
528 * vSES's sas address.
530 if ((handle <= ioc->sas_hba.num_phys) &&
531 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
532 MPI2_SAS_DEVICE_INFO_SEP)))
533 *sas_address = ioc->sas_hba.sas_address;
535 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
539 /* we hit this because the given parent handle doesn't exist */
540 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
543 /* else error case */
544 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
545 handle, ioc_status, __FILE__, __LINE__, __func__);
550 * _scsih_determine_boot_device - determine boot device.
551 * @ioc: per adapter object
552 * @device: sas_device or pcie_device object
553 * @channel: SAS or PCIe channel
555 * Determines whether this device should be first reported device to
556 * to scsi-ml or sas transport, this purpose is for persistent boot device.
557 * There are primary, alternate, and current entries in bios page 2. The order
558 * priority is primary, alternate, then current. This routine saves
559 * the corresponding device object.
560 * The saved data to be used later in _scsih_probe_boot_devices().
563 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
566 struct _sas_device *sas_device;
567 struct _pcie_device *pcie_device;
568 struct _raid_device *raid_device;
571 u64 enclosure_logical_id;
574 /* only process this function when driver loads */
575 if (!ioc->is_driver_loading)
578 /* no Bios, return immediately */
579 if (!ioc->bios_pg3.BiosVersion)
582 if (channel == RAID_CHANNEL) {
583 raid_device = device;
584 sas_address = raid_device->wwid;
586 enclosure_logical_id = 0;
588 } else if (channel == PCIE_CHANNEL) {
589 pcie_device = device;
590 sas_address = pcie_device->wwid;
592 enclosure_logical_id = 0;
596 sas_address = sas_device->sas_address;
597 device_name = sas_device->device_name;
598 enclosure_logical_id = sas_device->enclosure_logical_id;
599 slot = sas_device->slot;
602 if (!ioc->req_boot_device.device) {
603 if (_scsih_is_boot_device(sas_address, device_name,
604 enclosure_logical_id, slot,
605 (ioc->bios_pg2.ReqBootDeviceForm &
606 MPI2_BIOSPAGE2_FORM_MASK),
607 &ioc->bios_pg2.RequestedBootDevice)) {
609 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
610 __func__, (u64)sas_address));
611 ioc->req_boot_device.device = device;
612 ioc->req_boot_device.channel = channel;
616 if (!ioc->req_alt_boot_device.device) {
617 if (_scsih_is_boot_device(sas_address, device_name,
618 enclosure_logical_id, slot,
619 (ioc->bios_pg2.ReqAltBootDeviceForm &
620 MPI2_BIOSPAGE2_FORM_MASK),
621 &ioc->bios_pg2.RequestedAltBootDevice)) {
623 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
624 __func__, (u64)sas_address));
625 ioc->req_alt_boot_device.device = device;
626 ioc->req_alt_boot_device.channel = channel;
630 if (!ioc->current_boot_device.device) {
631 if (_scsih_is_boot_device(sas_address, device_name,
632 enclosure_logical_id, slot,
633 (ioc->bios_pg2.CurrentBootDeviceForm &
634 MPI2_BIOSPAGE2_FORM_MASK),
635 &ioc->bios_pg2.CurrentBootDevice)) {
637 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
638 __func__, (u64)sas_address));
639 ioc->current_boot_device.device = device;
640 ioc->current_boot_device.channel = channel;
645 static struct _sas_device *
646 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
647 struct MPT3SAS_TARGET *tgt_priv)
649 struct _sas_device *ret;
651 assert_spin_locked(&ioc->sas_device_lock);
653 ret = tgt_priv->sas_dev;
660 static struct _sas_device *
661 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
662 struct MPT3SAS_TARGET *tgt_priv)
664 struct _sas_device *ret;
667 spin_lock_irqsave(&ioc->sas_device_lock, flags);
668 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
669 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
674 static struct _pcie_device *
675 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
676 struct MPT3SAS_TARGET *tgt_priv)
678 struct _pcie_device *ret;
680 assert_spin_locked(&ioc->pcie_device_lock);
682 ret = tgt_priv->pcie_dev;
684 pcie_device_get(ret);
690 * mpt3sas_get_pdev_from_target - pcie device search
691 * @ioc: per adapter object
692 * @tgt_priv: starget private object
694 * Context: This function will acquire ioc->pcie_device_lock and will release
695 * before returning the pcie_device object.
697 * This searches for pcie_device from target, then return pcie_device object.
699 static struct _pcie_device *
700 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
701 struct MPT3SAS_TARGET *tgt_priv)
703 struct _pcie_device *ret;
706 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
707 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
708 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
715 * __mpt3sas_get_sdev_by_rphy - sas device search
716 * @ioc: per adapter object
717 * @rphy: sas_rphy pointer
719 * Context: This function will acquire ioc->sas_device_lock and will release
720 * before returning the sas_device object.
722 * This searches for sas_device from rphy object
723 * then return sas_device object.
726 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
727 struct sas_rphy *rphy)
729 struct _sas_device *sas_device;
731 assert_spin_locked(&ioc->sas_device_lock);
733 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
734 if (sas_device->rphy != rphy)
736 sas_device_get(sas_device);
741 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
742 if (sas_device->rphy != rphy)
744 sas_device_get(sas_device);
752 * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
753 * sas address from sas_device_list list
754 * @ioc: per adapter object
755 * @sas_address: device sas address
758 * Search for _sas_device object corresponding to provided sas address,
759 * if available return _sas_device object address otherwise return NULL.
762 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
763 u64 sas_address, struct hba_port *port)
765 struct _sas_device *sas_device;
770 assert_spin_locked(&ioc->sas_device_lock);
772 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
773 if (sas_device->sas_address != sas_address)
775 if (sas_device->port != port)
777 sas_device_get(sas_device);
781 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
782 if (sas_device->sas_address != sas_address)
784 if (sas_device->port != port)
786 sas_device_get(sas_device);
794 * mpt3sas_get_sdev_by_addr - sas device search
795 * @ioc: per adapter object
796 * @sas_address: sas address
797 * @port: hba port entry
798 * Context: Calling function should acquire ioc->sas_device_lock
800 * This searches for sas_device based on sas_address & port number,
801 * then return sas_device object.
804 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
805 u64 sas_address, struct hba_port *port)
807 struct _sas_device *sas_device;
810 spin_lock_irqsave(&ioc->sas_device_lock, flags);
811 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
813 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
818 static struct _sas_device *
819 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
821 struct _sas_device *sas_device;
823 assert_spin_locked(&ioc->sas_device_lock);
825 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
826 if (sas_device->handle == handle)
829 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
830 if (sas_device->handle == handle)
836 sas_device_get(sas_device);
841 * mpt3sas_get_sdev_by_handle - sas device search
842 * @ioc: per adapter object
843 * @handle: sas device handle (assigned by firmware)
844 * Context: Calling function should acquire ioc->sas_device_lock
846 * This searches for sas_device based on sas_address, then return sas_device
850 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
852 struct _sas_device *sas_device;
855 spin_lock_irqsave(&ioc->sas_device_lock, flags);
856 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
857 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
863 * _scsih_display_enclosure_chassis_info - display device location info
864 * @ioc: per adapter object
865 * @sas_device: per sas device object
866 * @sdev: scsi device struct
867 * @starget: scsi target struct
870 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
871 struct _sas_device *sas_device, struct scsi_device *sdev,
872 struct scsi_target *starget)
875 if (sas_device->enclosure_handle != 0)
876 sdev_printk(KERN_INFO, sdev,
877 "enclosure logical id (0x%016llx), slot(%d) \n",
879 sas_device->enclosure_logical_id,
881 if (sas_device->connector_name[0] != '\0')
882 sdev_printk(KERN_INFO, sdev,
883 "enclosure level(0x%04x), connector name( %s)\n",
884 sas_device->enclosure_level,
885 sas_device->connector_name);
886 if (sas_device->is_chassis_slot_valid)
887 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
888 sas_device->chassis_slot);
889 } else if (starget) {
890 if (sas_device->enclosure_handle != 0)
891 starget_printk(KERN_INFO, starget,
892 "enclosure logical id(0x%016llx), slot(%d) \n",
894 sas_device->enclosure_logical_id,
896 if (sas_device->connector_name[0] != '\0')
897 starget_printk(KERN_INFO, starget,
898 "enclosure level(0x%04x), connector name( %s)\n",
899 sas_device->enclosure_level,
900 sas_device->connector_name);
901 if (sas_device->is_chassis_slot_valid)
902 starget_printk(KERN_INFO, starget,
903 "chassis slot(0x%04x)\n",
904 sas_device->chassis_slot);
906 if (sas_device->enclosure_handle != 0)
907 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
908 (u64)sas_device->enclosure_logical_id,
910 if (sas_device->connector_name[0] != '\0')
911 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
912 sas_device->enclosure_level,
913 sas_device->connector_name);
914 if (sas_device->is_chassis_slot_valid)
915 ioc_info(ioc, "chassis slot(0x%04x)\n",
916 sas_device->chassis_slot);
921 * _scsih_sas_device_remove - remove sas_device from list.
922 * @ioc: per adapter object
923 * @sas_device: the sas_device object
924 * Context: This function will acquire ioc->sas_device_lock.
926 * If sas_device is on the list, remove it and decrement its reference count.
929 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
930 struct _sas_device *sas_device)
936 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
937 sas_device->handle, (u64)sas_device->sas_address);
939 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
942 * The lock serializes access to the list, but we still need to verify
943 * that nobody removed the entry while we were waiting on the lock.
945 spin_lock_irqsave(&ioc->sas_device_lock, flags);
946 if (!list_empty(&sas_device->list)) {
947 list_del_init(&sas_device->list);
948 sas_device_put(sas_device);
950 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
954 * _scsih_device_remove_by_handle - removing device object by handle
955 * @ioc: per adapter object
956 * @handle: device handle
959 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
961 struct _sas_device *sas_device;
964 if (ioc->shost_recovery)
967 spin_lock_irqsave(&ioc->sas_device_lock, flags);
968 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
970 list_del_init(&sas_device->list);
971 sas_device_put(sas_device);
973 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
975 _scsih_remove_device(ioc, sas_device);
976 sas_device_put(sas_device);
981 * mpt3sas_device_remove_by_sas_address - removing device object by
982 * sas address & port number
983 * @ioc: per adapter object
984 * @sas_address: device sas_address
985 * @port: hba port entry
990 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
991 u64 sas_address, struct hba_port *port)
993 struct _sas_device *sas_device;
996 if (ioc->shost_recovery)
999 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1000 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
1002 list_del_init(&sas_device->list);
1003 sas_device_put(sas_device);
1005 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1007 _scsih_remove_device(ioc, sas_device);
1008 sas_device_put(sas_device);
1013 * _scsih_sas_device_add - insert sas_device to the list.
1014 * @ioc: per adapter object
1015 * @sas_device: the sas_device object
1016 * Context: This function will acquire ioc->sas_device_lock.
1018 * Adding new object to the ioc->sas_device_list.
1021 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1022 struct _sas_device *sas_device)
1024 unsigned long flags;
1027 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1028 __func__, sas_device->handle,
1029 (u64)sas_device->sas_address));
1031 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1034 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1035 sas_device_get(sas_device);
1036 list_add_tail(&sas_device->list, &ioc->sas_device_list);
1037 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1039 if (ioc->hide_drives) {
1040 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1044 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1045 sas_device->sas_address_parent, sas_device->port)) {
1046 _scsih_sas_device_remove(ioc, sas_device);
1047 } else if (!sas_device->starget) {
1049 * When asyn scanning is enabled, its not possible to remove
1050 * devices while scanning is turned on due to an oops in
1051 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
1053 if (!ioc->is_driver_loading) {
1054 mpt3sas_transport_port_remove(ioc,
1055 sas_device->sas_address,
1056 sas_device->sas_address_parent,
1058 _scsih_sas_device_remove(ioc, sas_device);
1061 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1065 * _scsih_sas_device_init_add - insert sas_device to the list.
1066 * @ioc: per adapter object
1067 * @sas_device: the sas_device object
1068 * Context: This function will acquire ioc->sas_device_lock.
1070 * Adding new object at driver load time to the ioc->sas_device_init_list.
1073 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1074 struct _sas_device *sas_device)
1076 unsigned long flags;
1079 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1080 __func__, sas_device->handle,
1081 (u64)sas_device->sas_address));
1083 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1086 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1087 sas_device_get(sas_device);
1088 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1089 _scsih_determine_boot_device(ioc, sas_device, 0);
1090 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1094 static struct _pcie_device *
1095 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1097 struct _pcie_device *pcie_device;
1099 assert_spin_locked(&ioc->pcie_device_lock);
1101 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1102 if (pcie_device->wwid == wwid)
1105 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1106 if (pcie_device->wwid == wwid)
1112 pcie_device_get(pcie_device);
1118 * mpt3sas_get_pdev_by_wwid - pcie device search
1119 * @ioc: per adapter object
1122 * Context: This function will acquire ioc->pcie_device_lock and will release
1123 * before returning the pcie_device object.
1125 * This searches for pcie_device based on wwid, then return pcie_device object.
1127 static struct _pcie_device *
1128 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1130 struct _pcie_device *pcie_device;
1131 unsigned long flags;
1133 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1134 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1135 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1141 static struct _pcie_device *
1142 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1145 struct _pcie_device *pcie_device;
1147 assert_spin_locked(&ioc->pcie_device_lock);
1149 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1150 if (pcie_device->id == id && pcie_device->channel == channel)
1153 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1154 if (pcie_device->id == id && pcie_device->channel == channel)
1160 pcie_device_get(pcie_device);
1164 static struct _pcie_device *
1165 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1167 struct _pcie_device *pcie_device;
1169 assert_spin_locked(&ioc->pcie_device_lock);
1171 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1172 if (pcie_device->handle == handle)
1175 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1176 if (pcie_device->handle == handle)
1182 pcie_device_get(pcie_device);
1188 * mpt3sas_get_pdev_by_handle - pcie device search
1189 * @ioc: per adapter object
1190 * @handle: Firmware device handle
1192 * Context: This function will acquire ioc->pcie_device_lock and will release
1193 * before returning the pcie_device object.
1195 * This searches for pcie_device based on handle, then return pcie_device
1198 struct _pcie_device *
1199 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1201 struct _pcie_device *pcie_device;
1202 unsigned long flags;
1204 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1205 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1206 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1212 * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1213 * @ioc: per adapter object
1214 * Context: This function will acquire ioc->pcie_device_lock
1216 * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1217 * which has reported maximum among all available NVMe drives.
1218 * Minimum max_shutdown_latency will be six seconds.
1221 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1223 struct _pcie_device *pcie_device;
1224 unsigned long flags;
1225 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1227 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1228 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1229 if (pcie_device->shutdown_latency) {
1230 if (shutdown_latency < pcie_device->shutdown_latency)
1232 pcie_device->shutdown_latency;
1235 ioc->max_shutdown_latency = shutdown_latency;
1236 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1240 * _scsih_pcie_device_remove - remove pcie_device from list.
1241 * @ioc: per adapter object
1242 * @pcie_device: the pcie_device object
1243 * Context: This function will acquire ioc->pcie_device_lock.
1245 * If pcie_device is on the list, remove it and decrement its reference count.
1248 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1249 struct _pcie_device *pcie_device)
1251 unsigned long flags;
1252 int was_on_pcie_device_list = 0;
1253 u8 update_latency = 0;
1257 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1258 pcie_device->handle, (u64)pcie_device->wwid);
1259 if (pcie_device->enclosure_handle != 0)
1260 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1261 (u64)pcie_device->enclosure_logical_id,
1263 if (pcie_device->connector_name[0] != '\0')
1264 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1265 pcie_device->enclosure_level,
1266 pcie_device->connector_name);
1268 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1269 if (!list_empty(&pcie_device->list)) {
1270 list_del_init(&pcie_device->list);
1271 was_on_pcie_device_list = 1;
1273 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1275 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1276 if (was_on_pcie_device_list) {
1277 kfree(pcie_device->serial_number);
1278 pcie_device_put(pcie_device);
1282 * This device's RTD3 Entry Latency matches IOC's
1283 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1284 * from the available drives as current drive is getting removed.
1287 _scsih_set_nvme_max_shutdown_latency(ioc);
1292 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1293 * @ioc: per adapter object
1294 * @handle: device handle
1297 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1299 struct _pcie_device *pcie_device;
1300 unsigned long flags;
1301 int was_on_pcie_device_list = 0;
1302 u8 update_latency = 0;
1304 if (ioc->shost_recovery)
1307 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1308 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1310 if (!list_empty(&pcie_device->list)) {
1311 list_del_init(&pcie_device->list);
1312 was_on_pcie_device_list = 1;
1313 pcie_device_put(pcie_device);
1315 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1318 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1319 if (was_on_pcie_device_list) {
1320 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1321 pcie_device_put(pcie_device);
1325 * This device's RTD3 Entry Latency matches IOC's
1326 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1327 * from the available drives as current drive is getting removed.
1330 _scsih_set_nvme_max_shutdown_latency(ioc);
1334 * _scsih_pcie_device_add - add pcie_device object
1335 * @ioc: per adapter object
1336 * @pcie_device: pcie_device object
1338 * This is added to the pcie_device_list link list.
1341 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1342 struct _pcie_device *pcie_device)
1344 unsigned long flags;
1347 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1349 pcie_device->handle, (u64)pcie_device->wwid));
1350 if (pcie_device->enclosure_handle != 0)
1352 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1354 (u64)pcie_device->enclosure_logical_id,
1355 pcie_device->slot));
1356 if (pcie_device->connector_name[0] != '\0')
1358 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1359 __func__, pcie_device->enclosure_level,
1360 pcie_device->connector_name));
1362 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1363 pcie_device_get(pcie_device);
1364 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1365 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1367 if (pcie_device->access_status ==
1368 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1369 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1372 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1373 _scsih_pcie_device_remove(ioc, pcie_device);
1374 } else if (!pcie_device->starget) {
1375 if (!ioc->is_driver_loading) {
1376 /*TODO-- Need to find out whether this condition will occur or not*/
1377 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1380 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1384 * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1385 * @ioc: per adapter object
1386 * @pcie_device: the pcie_device object
1387 * Context: This function will acquire ioc->pcie_device_lock.
1389 * Adding new object at driver load time to the ioc->pcie_device_init_list.
1392 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1393 struct _pcie_device *pcie_device)
1395 unsigned long flags;
1398 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1400 pcie_device->handle, (u64)pcie_device->wwid));
1401 if (pcie_device->enclosure_handle != 0)
1403 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1405 (u64)pcie_device->enclosure_logical_id,
1406 pcie_device->slot));
1407 if (pcie_device->connector_name[0] != '\0')
1409 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1410 __func__, pcie_device->enclosure_level,
1411 pcie_device->connector_name));
1413 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1414 pcie_device_get(pcie_device);
1415 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1416 if (pcie_device->access_status !=
1417 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1418 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1419 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1422 * _scsih_raid_device_find_by_id - raid device search
1423 * @ioc: per adapter object
1424 * @id: sas device target id
1425 * @channel: sas device channel
1426 * Context: Calling function should acquire ioc->raid_device_lock
1428 * This searches for raid_device based on target id, then return raid_device
1431 static struct _raid_device *
1432 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1434 struct _raid_device *raid_device, *r;
1437 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1438 if (raid_device->id == id && raid_device->channel == channel) {
1449 * mpt3sas_raid_device_find_by_handle - raid device search
1450 * @ioc: per adapter object
1451 * @handle: sas device handle (assigned by firmware)
1452 * Context: Calling function should acquire ioc->raid_device_lock
1454 * This searches for raid_device based on handle, then return raid_device
1457 struct _raid_device *
1458 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1460 struct _raid_device *raid_device, *r;
1463 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1464 if (raid_device->handle != handle)
1475 * _scsih_raid_device_find_by_wwid - raid device search
1476 * @ioc: per adapter object
1478 * Context: Calling function should acquire ioc->raid_device_lock
1480 * This searches for raid_device based on wwid, then return raid_device
1483 static struct _raid_device *
1484 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1486 struct _raid_device *raid_device, *r;
1489 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1490 if (raid_device->wwid != wwid)
1501 * _scsih_raid_device_add - add raid_device object
1502 * @ioc: per adapter object
1503 * @raid_device: raid_device object
1505 * This is added to the raid_device_list link list.
1508 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1509 struct _raid_device *raid_device)
1511 unsigned long flags;
1514 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1516 raid_device->handle, (u64)raid_device->wwid));
1518 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1519 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1520 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1524 * _scsih_raid_device_remove - delete raid_device object
1525 * @ioc: per adapter object
1526 * @raid_device: raid_device object
1530 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1531 struct _raid_device *raid_device)
1533 unsigned long flags;
1535 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1536 list_del(&raid_device->list);
1538 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1542 * mpt3sas_scsih_expander_find_by_handle - expander device search
1543 * @ioc: per adapter object
1544 * @handle: expander handle (assigned by firmware)
1545 * Context: Calling function should acquire ioc->sas_device_lock
1547 * This searches for expander device based on handle, then returns the
1551 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1553 struct _sas_node *sas_expander, *r;
1556 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1557 if (sas_expander->handle != handle)
1567 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1568 * @ioc: per adapter object
1569 * @handle: enclosure handle (assigned by firmware)
1570 * Context: Calling function should acquire ioc->sas_device_lock
1572 * This searches for enclosure device based on handle, then returns the
1575 static struct _enclosure_node *
1576 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1578 struct _enclosure_node *enclosure_dev, *r;
1581 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1582 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1591 * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1592 * @ioc: per adapter object
1593 * @sas_address: sas address
1594 * @port: hba port entry
1595 * Context: Calling function should acquire ioc->sas_node_lock.
1597 * This searches for expander device based on sas_address & port number,
1598 * then returns the sas_node object.
1601 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1602 u64 sas_address, struct hba_port *port)
1604 struct _sas_node *sas_expander, *r = NULL;
1609 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1610 if (sas_expander->sas_address != sas_address)
1612 if (sas_expander->port != port)
1622 * _scsih_expander_node_add - insert expander device to the list.
1623 * @ioc: per adapter object
1624 * @sas_expander: the sas_device object
1625 * Context: This function will acquire ioc->sas_node_lock.
1627 * Adding new object to the ioc->sas_expander_list.
1630 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1631 struct _sas_node *sas_expander)
1633 unsigned long flags;
1635 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1636 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1637 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1641 * _scsih_is_end_device - determines if device is an end device
1642 * @device_info: bitfield providing information about the device.
1645 * Return: 1 if end device.
1648 _scsih_is_end_device(u32 device_info)
1650 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1651 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1652 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1653 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1660 * _scsih_is_nvme_pciescsi_device - determines if
1661 * device is an pcie nvme/scsi device
1662 * @device_info: bitfield providing information about the device.
1665 * Returns 1 if device is pcie device type nvme/scsi.
1668 _scsih_is_nvme_pciescsi_device(u32 device_info)
1670 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1671 == MPI26_PCIE_DEVINFO_NVME) ||
1672 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1673 == MPI26_PCIE_DEVINFO_SCSI))
1680 * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1681 * @ioc: per adapter object
1684 * Context: This function will acquire ioc->scsi_lookup_lock.
1686 * This will search for a matching channel:id in the scsi_lookup array,
1687 * returning 1 if found.
1690 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1694 struct scsi_cmnd *scmd;
1697 smid <= ioc->shost->can_queue; smid++) {
1698 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1701 if (scmd->device->id == id &&
1702 scmd->device->channel == channel)
1709 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1710 * @ioc: per adapter object
1714 * Context: This function will acquire ioc->scsi_lookup_lock.
1716 * This will search for a matching channel:id:lun in the scsi_lookup array,
1717 * returning 1 if found.
1720 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1721 unsigned int lun, int channel)
1724 struct scsi_cmnd *scmd;
1726 for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1728 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1731 if (scmd->device->id == id &&
1732 scmd->device->channel == channel &&
1733 scmd->device->lun == lun)
1740 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1741 * @ioc: per adapter object
1742 * @smid: system request message index
1744 * Return: the smid stored scmd pointer.
1745 * Then will dereference the stored scmd pointer.
1748 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1750 struct scsi_cmnd *scmd = NULL;
1751 struct scsiio_tracker *st;
1752 Mpi25SCSIIORequest_t *mpi_request;
1756 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1758 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
1760 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1763 * If SCSI IO request is outstanding at driver level then
1764 * DevHandle filed must be non-zero. If DevHandle is zero
1765 * then it means that this smid is free at driver level,
1768 if (!mpi_request->DevHandle)
1771 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1773 st = scsi_cmd_priv(scmd);
1774 if (st->cb_idx == 0xFF || st->smid == 0)
1782 * scsih_change_queue_depth - setting device queue depth
1783 * @sdev: scsi device struct
1784 * @qdepth: requested queue depth
1786 * Return: queue depth.
1789 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1791 struct Scsi_Host *shost = sdev->host;
1793 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1794 struct MPT3SAS_DEVICE *sas_device_priv_data;
1795 struct MPT3SAS_TARGET *sas_target_priv_data;
1796 struct _sas_device *sas_device;
1797 unsigned long flags;
1799 max_depth = shost->can_queue;
1802 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1805 if (ioc->enable_sdev_max_qd)
1808 sas_device_priv_data = sdev->hostdata;
1809 if (!sas_device_priv_data)
1811 sas_target_priv_data = sas_device_priv_data->sas_target;
1812 if (!sas_target_priv_data)
1814 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1817 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1818 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1820 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1821 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1823 sas_device_put(sas_device);
1825 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1829 if (!sdev->tagged_supported)
1831 if (qdepth > max_depth)
1833 scsi_change_queue_depth(sdev, qdepth);
1834 sdev_printk(KERN_INFO, sdev,
1835 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1836 sdev->queue_depth, sdev->tagged_supported,
1837 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1838 return sdev->queue_depth;
1842 * mpt3sas_scsih_change_queue_depth - setting device queue depth
1843 * @sdev: scsi device struct
1844 * @qdepth: requested queue depth
1849 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1851 struct Scsi_Host *shost = sdev->host;
1852 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1854 if (ioc->enable_sdev_max_qd)
1855 qdepth = shost->can_queue;
1857 scsih_change_queue_depth(sdev, qdepth);
1861 * scsih_target_alloc - target add routine
1862 * @starget: scsi target struct
1864 * Return: 0 if ok. Any other return is assumed to be an error and
1865 * the device is ignored.
1868 scsih_target_alloc(struct scsi_target *starget)
1870 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1871 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1872 struct MPT3SAS_TARGET *sas_target_priv_data;
1873 struct _sas_device *sas_device;
1874 struct _raid_device *raid_device;
1875 struct _pcie_device *pcie_device;
1876 unsigned long flags;
1877 struct sas_rphy *rphy;
1879 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1881 if (!sas_target_priv_data)
1884 starget->hostdata = sas_target_priv_data;
1885 sas_target_priv_data->starget = starget;
1886 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1889 if (starget->channel == RAID_CHANNEL) {
1890 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1891 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1894 sas_target_priv_data->handle = raid_device->handle;
1895 sas_target_priv_data->sas_address = raid_device->wwid;
1896 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1897 if (ioc->is_warpdrive)
1898 sas_target_priv_data->raid_device = raid_device;
1899 raid_device->starget = starget;
1901 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1906 if (starget->channel == PCIE_CHANNEL) {
1907 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1908 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1911 sas_target_priv_data->handle = pcie_device->handle;
1912 sas_target_priv_data->sas_address = pcie_device->wwid;
1913 sas_target_priv_data->port = NULL;
1914 sas_target_priv_data->pcie_dev = pcie_device;
1915 pcie_device->starget = starget;
1916 pcie_device->id = starget->id;
1917 pcie_device->channel = starget->channel;
1918 sas_target_priv_data->flags |=
1919 MPT_TARGET_FLAGS_PCIE_DEVICE;
1920 if (pcie_device->fast_path)
1921 sas_target_priv_data->flags |=
1922 MPT_TARGET_FASTPATH_IO;
1924 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1928 /* sas/sata devices */
1929 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1930 rphy = dev_to_rphy(starget->dev.parent);
1931 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1934 sas_target_priv_data->handle = sas_device->handle;
1935 sas_target_priv_data->sas_address = sas_device->sas_address;
1936 sas_target_priv_data->port = sas_device->port;
1937 sas_target_priv_data->sas_dev = sas_device;
1938 sas_device->starget = starget;
1939 sas_device->id = starget->id;
1940 sas_device->channel = starget->channel;
1941 if (test_bit(sas_device->handle, ioc->pd_handles))
1942 sas_target_priv_data->flags |=
1943 MPT_TARGET_FLAGS_RAID_COMPONENT;
1944 if (sas_device->fast_path)
1945 sas_target_priv_data->flags |=
1946 MPT_TARGET_FASTPATH_IO;
1948 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1954 * scsih_target_destroy - target destroy routine
1955 * @starget: scsi target struct
1958 scsih_target_destroy(struct scsi_target *starget)
1960 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1961 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1962 struct MPT3SAS_TARGET *sas_target_priv_data;
1963 struct _sas_device *sas_device;
1964 struct _raid_device *raid_device;
1965 struct _pcie_device *pcie_device;
1966 unsigned long flags;
1968 sas_target_priv_data = starget->hostdata;
1969 if (!sas_target_priv_data)
1972 if (starget->channel == RAID_CHANNEL) {
1973 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1974 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1977 raid_device->starget = NULL;
1978 raid_device->sdev = NULL;
1980 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1984 if (starget->channel == PCIE_CHANNEL) {
1985 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1986 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1987 sas_target_priv_data);
1988 if (pcie_device && (pcie_device->starget == starget) &&
1989 (pcie_device->id == starget->id) &&
1990 (pcie_device->channel == starget->channel))
1991 pcie_device->starget = NULL;
1995 * Corresponding get() is in _scsih_target_alloc()
1997 sas_target_priv_data->pcie_dev = NULL;
1998 pcie_device_put(pcie_device);
1999 pcie_device_put(pcie_device);
2001 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2005 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2006 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
2007 if (sas_device && (sas_device->starget == starget) &&
2008 (sas_device->id == starget->id) &&
2009 (sas_device->channel == starget->channel))
2010 sas_device->starget = NULL;
2014 * Corresponding get() is in _scsih_target_alloc()
2016 sas_target_priv_data->sas_dev = NULL;
2017 sas_device_put(sas_device);
2019 sas_device_put(sas_device);
2021 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2024 kfree(sas_target_priv_data);
2025 starget->hostdata = NULL;
2029 * scsih_slave_alloc - device add routine
2030 * @sdev: scsi device struct
2032 * Return: 0 if ok. Any other return is assumed to be an error and
2033 * the device is ignored.
2036 scsih_slave_alloc(struct scsi_device *sdev)
2038 struct Scsi_Host *shost;
2039 struct MPT3SAS_ADAPTER *ioc;
2040 struct MPT3SAS_TARGET *sas_target_priv_data;
2041 struct MPT3SAS_DEVICE *sas_device_priv_data;
2042 struct scsi_target *starget;
2043 struct _raid_device *raid_device;
2044 struct _sas_device *sas_device;
2045 struct _pcie_device *pcie_device;
2046 unsigned long flags;
2048 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2050 if (!sas_device_priv_data)
2053 sas_device_priv_data->lun = sdev->lun;
2054 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2056 starget = scsi_target(sdev);
2057 sas_target_priv_data = starget->hostdata;
2058 sas_target_priv_data->num_luns++;
2059 sas_device_priv_data->sas_target = sas_target_priv_data;
2060 sdev->hostdata = sas_device_priv_data;
2061 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2062 sdev->no_uld_attach = 1;
2064 shost = dev_to_shost(&starget->dev);
2065 ioc = shost_priv(shost);
2066 if (starget->channel == RAID_CHANNEL) {
2067 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2068 raid_device = _scsih_raid_device_find_by_id(ioc,
2069 starget->id, starget->channel);
2071 raid_device->sdev = sdev; /* raid is single lun */
2072 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2074 if (starget->channel == PCIE_CHANNEL) {
2075 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2076 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2077 sas_target_priv_data->sas_address);
2078 if (pcie_device && (pcie_device->starget == NULL)) {
2079 sdev_printk(KERN_INFO, sdev,
2080 "%s : pcie_device->starget set to starget @ %d\n",
2081 __func__, __LINE__);
2082 pcie_device->starget = starget;
2086 pcie_device_put(pcie_device);
2087 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2089 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2090 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2091 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2092 sas_target_priv_data->sas_address,
2093 sas_target_priv_data->port);
2094 if (sas_device && (sas_device->starget == NULL)) {
2095 sdev_printk(KERN_INFO, sdev,
2096 "%s : sas_device->starget set to starget @ %d\n",
2097 __func__, __LINE__);
2098 sas_device->starget = starget;
2102 sas_device_put(sas_device);
2104 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2111 * scsih_slave_destroy - device destroy routine
2112 * @sdev: scsi device struct
2115 scsih_slave_destroy(struct scsi_device *sdev)
2117 struct MPT3SAS_TARGET *sas_target_priv_data;
2118 struct scsi_target *starget;
2119 struct Scsi_Host *shost;
2120 struct MPT3SAS_ADAPTER *ioc;
2121 struct _sas_device *sas_device;
2122 struct _pcie_device *pcie_device;
2123 unsigned long flags;
2125 if (!sdev->hostdata)
2128 starget = scsi_target(sdev);
2129 sas_target_priv_data = starget->hostdata;
2130 sas_target_priv_data->num_luns--;
2132 shost = dev_to_shost(&starget->dev);
2133 ioc = shost_priv(shost);
2135 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2136 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2137 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2138 sas_target_priv_data);
2139 if (pcie_device && !sas_target_priv_data->num_luns)
2140 pcie_device->starget = NULL;
2143 pcie_device_put(pcie_device);
2145 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2147 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2148 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2149 sas_device = __mpt3sas_get_sdev_from_target(ioc,
2150 sas_target_priv_data);
2151 if (sas_device && !sas_target_priv_data->num_luns)
2152 sas_device->starget = NULL;
2155 sas_device_put(sas_device);
2156 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2159 kfree(sdev->hostdata);
2160 sdev->hostdata = NULL;
2164 * _scsih_display_sata_capabilities - sata capabilities
2165 * @ioc: per adapter object
2166 * @handle: device handle
2167 * @sdev: scsi device struct
2170 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2171 u16 handle, struct scsi_device *sdev)
2173 Mpi2ConfigReply_t mpi_reply;
2174 Mpi2SasDevicePage0_t sas_device_pg0;
2179 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2180 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2181 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2182 __FILE__, __LINE__, __func__);
2186 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2187 MPI2_IOCSTATUS_MASK;
2188 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2189 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2190 __FILE__, __LINE__, __func__);
2194 flags = le16_to_cpu(sas_device_pg0.Flags);
2195 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2197 sdev_printk(KERN_INFO, sdev,
2198 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2199 "sw_preserve(%s)\n",
2200 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2201 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2202 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2204 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2206 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2210 * raid transport support -
2211 * Enabled for SLES11 and newer, in older kernels the driver will panic when
2212 * unloading the driver followed by a load - I believe that the subroutine
2213 * raid_class_release() is not cleaning up properly.
2217 * scsih_is_raid - return boolean indicating device is raid volume
2218 * @dev: the device struct object
2221 scsih_is_raid(struct device *dev)
2223 struct scsi_device *sdev = to_scsi_device(dev);
2224 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2226 if (ioc->is_warpdrive)
2228 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2232 scsih_is_nvme(struct device *dev)
2234 struct scsi_device *sdev = to_scsi_device(dev);
2236 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2240 * scsih_get_resync - get raid volume resync percent complete
2241 * @dev: the device struct object
2244 scsih_get_resync(struct device *dev)
2246 struct scsi_device *sdev = to_scsi_device(dev);
2247 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2248 static struct _raid_device *raid_device;
2249 unsigned long flags;
2250 Mpi2RaidVolPage0_t vol_pg0;
2251 Mpi2ConfigReply_t mpi_reply;
2252 u32 volume_status_flags;
2253 u8 percent_complete;
2256 percent_complete = 0;
2258 if (ioc->is_warpdrive)
2261 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2262 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2265 handle = raid_device->handle;
2266 percent_complete = raid_device->percent_complete;
2268 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2273 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2274 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2275 sizeof(Mpi2RaidVolPage0_t))) {
2276 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2277 __FILE__, __LINE__, __func__);
2278 percent_complete = 0;
2282 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2283 if (!(volume_status_flags &
2284 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2285 percent_complete = 0;
2289 switch (ioc->hba_mpi_version_belonged) {
2291 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2295 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2301 * scsih_get_state - get raid volume level
2302 * @dev: the device struct object
2305 scsih_get_state(struct device *dev)
2307 struct scsi_device *sdev = to_scsi_device(dev);
2308 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2309 static struct _raid_device *raid_device;
2310 unsigned long flags;
2311 Mpi2RaidVolPage0_t vol_pg0;
2312 Mpi2ConfigReply_t mpi_reply;
2314 enum raid_state state = RAID_STATE_UNKNOWN;
2317 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2318 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2321 handle = raid_device->handle;
2322 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2327 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2328 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2329 sizeof(Mpi2RaidVolPage0_t))) {
2330 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2331 __FILE__, __LINE__, __func__);
2335 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2336 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2337 state = RAID_STATE_RESYNCING;
2341 switch (vol_pg0.VolumeState) {
2342 case MPI2_RAID_VOL_STATE_OPTIMAL:
2343 case MPI2_RAID_VOL_STATE_ONLINE:
2344 state = RAID_STATE_ACTIVE;
2346 case MPI2_RAID_VOL_STATE_DEGRADED:
2347 state = RAID_STATE_DEGRADED;
2349 case MPI2_RAID_VOL_STATE_FAILED:
2350 case MPI2_RAID_VOL_STATE_MISSING:
2351 state = RAID_STATE_OFFLINE;
2355 switch (ioc->hba_mpi_version_belonged) {
2357 raid_set_state(mpt2sas_raid_template, dev, state);
2361 raid_set_state(mpt3sas_raid_template, dev, state);
2367 * _scsih_set_level - set raid level
2369 * @sdev: scsi device struct
2370 * @volume_type: volume type
2373 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2374 struct scsi_device *sdev, u8 volume_type)
2376 enum raid_level level = RAID_LEVEL_UNKNOWN;
2378 switch (volume_type) {
2379 case MPI2_RAID_VOL_TYPE_RAID0:
2380 level = RAID_LEVEL_0;
2382 case MPI2_RAID_VOL_TYPE_RAID10:
2383 level = RAID_LEVEL_10;
2385 case MPI2_RAID_VOL_TYPE_RAID1E:
2386 level = RAID_LEVEL_1E;
2388 case MPI2_RAID_VOL_TYPE_RAID1:
2389 level = RAID_LEVEL_1;
2393 switch (ioc->hba_mpi_version_belonged) {
2395 raid_set_level(mpt2sas_raid_template,
2396 &sdev->sdev_gendev, level);
2400 raid_set_level(mpt3sas_raid_template,
2401 &sdev->sdev_gendev, level);
2408 * _scsih_get_volume_capabilities - volume capabilities
2409 * @ioc: per adapter object
2410 * @raid_device: the raid_device object
2412 * Return: 0 for success, else 1
2415 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2416 struct _raid_device *raid_device)
2418 Mpi2RaidVolPage0_t *vol_pg0;
2419 Mpi2RaidPhysDiskPage0_t pd_pg0;
2420 Mpi2SasDevicePage0_t sas_device_pg0;
2421 Mpi2ConfigReply_t mpi_reply;
2425 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2426 &num_pds)) || !num_pds) {
2428 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2429 __FILE__, __LINE__, __func__));
2433 raid_device->num_pds = num_pds;
2434 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2435 sizeof(Mpi2RaidVol0PhysDisk_t));
2436 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2439 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2440 __FILE__, __LINE__, __func__));
2444 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2445 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2447 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2448 __FILE__, __LINE__, __func__));
2453 raid_device->volume_type = vol_pg0->VolumeType;
2455 /* figure out what the underlying devices are by
2456 * obtaining the device_info bits for the 1st device
2458 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2459 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2460 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2461 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2462 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2463 le16_to_cpu(pd_pg0.DevHandle)))) {
2464 raid_device->device_info =
2465 le32_to_cpu(sas_device_pg0.DeviceInfo);
2474 * _scsih_enable_tlr - setting TLR flags
2475 * @ioc: per adapter object
2476 * @sdev: scsi device struct
2478 * Enabling Transaction Layer Retries for tape devices when
2479 * vpd page 0x90 is present
2483 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2487 if (sdev->type != TYPE_TAPE)
2490 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2493 sas_enable_tlr(sdev);
2494 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2495 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2501 * scsih_slave_configure - device configure routine.
2502 * @sdev: scsi device struct
2504 * Return: 0 if ok. Any other return is assumed to be an error and
2505 * the device is ignored.
2508 scsih_slave_configure(struct scsi_device *sdev)
2510 struct Scsi_Host *shost = sdev->host;
2511 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2512 struct MPT3SAS_DEVICE *sas_device_priv_data;
2513 struct MPT3SAS_TARGET *sas_target_priv_data;
2514 struct _sas_device *sas_device;
2515 struct _pcie_device *pcie_device;
2516 struct _raid_device *raid_device;
2517 unsigned long flags;
2522 u16 handle, volume_handle = 0;
2523 u64 volume_wwid = 0;
2526 sas_device_priv_data = sdev->hostdata;
2527 sas_device_priv_data->configured_lun = 1;
2528 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2529 sas_target_priv_data = sas_device_priv_data->sas_target;
2530 handle = sas_target_priv_data->handle;
2532 /* raid volume handling */
2533 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2535 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2536 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2537 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2540 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2541 __FILE__, __LINE__, __func__));
2545 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2547 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2548 __FILE__, __LINE__, __func__));
2553 * WARPDRIVE: Initialize the required data for Direct IO
2555 mpt3sas_init_warpdrive_properties(ioc, raid_device);
2557 /* RAID Queue Depth Support
2558 * IS volume = underlying qdepth of drive type, either
2559 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2560 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2562 if (raid_device->device_info &
2563 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2564 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2567 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2568 if (raid_device->device_info &
2569 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2575 switch (raid_device->volume_type) {
2576 case MPI2_RAID_VOL_TYPE_RAID0:
2579 case MPI2_RAID_VOL_TYPE_RAID1E:
2580 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2581 if (ioc->manu_pg10.OEMIdentifier &&
2582 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2583 MFG10_GF0_R10_DISPLAY) &&
2584 !(raid_device->num_pds % 2))
2589 case MPI2_RAID_VOL_TYPE_RAID1:
2590 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2593 case MPI2_RAID_VOL_TYPE_RAID10:
2594 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2597 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2599 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2604 if (!ioc->hide_ir_msg)
2605 sdev_printk(KERN_INFO, sdev,
2606 "%s: handle(0x%04x), wwid(0x%016llx),"
2607 " pd_count(%d), type(%s)\n",
2608 r_level, raid_device->handle,
2609 (unsigned long long)raid_device->wwid,
2610 raid_device->num_pds, ds);
2612 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2613 blk_queue_max_hw_sectors(sdev->request_queue,
2614 MPT3SAS_RAID_MAX_SECTORS);
2615 sdev_printk(KERN_INFO, sdev,
2616 "Set queue's max_sector to: %u\n",
2617 MPT3SAS_RAID_MAX_SECTORS);
2620 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2622 /* raid transport support */
2623 if (!ioc->is_warpdrive)
2624 _scsih_set_level(ioc, sdev, raid_device->volume_type);
2628 /* non-raid handling */
2629 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2630 if (mpt3sas_config_get_volume_handle(ioc, handle,
2633 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2634 __FILE__, __LINE__, __func__));
2637 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2638 volume_handle, &volume_wwid)) {
2640 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2641 __FILE__, __LINE__, __func__));
2647 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2648 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2649 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2650 sas_device_priv_data->sas_target->sas_address);
2652 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2654 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2655 __FILE__, __LINE__, __func__));
2659 qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2661 sdev_printk(KERN_INFO, sdev,
2662 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2663 ds, handle, (unsigned long long)pcie_device->wwid,
2664 pcie_device->port_num);
2665 if (pcie_device->enclosure_handle != 0)
2666 sdev_printk(KERN_INFO, sdev,
2667 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2669 (unsigned long long)pcie_device->enclosure_logical_id,
2671 if (pcie_device->connector_name[0] != '\0')
2672 sdev_printk(KERN_INFO, sdev,
2673 "%s: enclosure level(0x%04x),"
2674 "connector name( %s)\n", ds,
2675 pcie_device->enclosure_level,
2676 pcie_device->connector_name);
2678 if (pcie_device->nvme_mdts)
2679 blk_queue_max_hw_sectors(sdev->request_queue,
2680 pcie_device->nvme_mdts/512);
2682 pcie_device_put(pcie_device);
2683 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2684 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2685 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2686 ** merged and can eliminate holes created during merging
2689 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2690 sdev->request_queue);
2691 blk_queue_virt_boundary(sdev->request_queue,
2692 ioc->page_size - 1);
2696 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2697 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2698 sas_device_priv_data->sas_target->sas_address,
2699 sas_device_priv_data->sas_target->port);
2701 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2703 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2704 __FILE__, __LINE__, __func__));
2708 sas_device->volume_handle = volume_handle;
2709 sas_device->volume_wwid = volume_wwid;
2710 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2711 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2713 if (sas_device->device_info &
2714 MPI2_SAS_DEVICE_INFO_SEP) {
2715 sdev_printk(KERN_WARNING, sdev,
2716 "set ignore_delay_remove for handle(0x%04x)\n",
2717 sas_device_priv_data->sas_target->handle);
2718 sas_device_priv_data->ignore_delay_remove = 1;
2723 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2724 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2726 else if (sas_device->device_info &
2727 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2731 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2732 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2733 ds, handle, (unsigned long long)sas_device->sas_address,
2734 sas_device->phy, (unsigned long long)sas_device->device_name);
2736 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2738 sas_device_put(sas_device);
2739 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2742 _scsih_display_sata_capabilities(ioc, handle, sdev);
2745 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2748 sas_read_port_mode_page(sdev);
2749 _scsih_enable_tlr(ioc, sdev);
2756 * scsih_bios_param - fetch head, sector, cylinder info for a disk
2757 * @sdev: scsi device struct
2758 * @bdev: pointer to block device context
2759 * @capacity: device size (in 512 byte sectors)
2760 * @params: three element array to place output:
2761 * params[0] number of heads (max 255)
2762 * params[1] number of sectors (max 63)
2763 * params[2] number of cylinders
2766 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2767 sector_t capacity, int params[])
2777 dummy = heads * sectors;
2778 cylinders = capacity;
2779 sector_div(cylinders, dummy);
2782 * Handle extended translation size for logical drives
2785 if ((ulong)capacity >= 0x200000) {
2788 dummy = heads * sectors;
2789 cylinders = capacity;
2790 sector_div(cylinders, dummy);
2795 params[1] = sectors;
2796 params[2] = cylinders;
2802 * _scsih_response_code - translation of device response code
2803 * @ioc: per adapter object
2804 * @response_code: response code returned by the device
2807 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2811 switch (response_code) {
2812 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2813 desc = "task management request completed";
2815 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2816 desc = "invalid frame";
2818 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2819 desc = "task management request not supported";
2821 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2822 desc = "task management request failed";
2824 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2825 desc = "task management request succeeded";
2827 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2828 desc = "invalid lun";
2831 desc = "overlapped tag attempted";
2833 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2834 desc = "task queued, however not sent to target";
2840 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2844 * _scsih_tm_done - tm completion routine
2845 * @ioc: per adapter object
2846 * @smid: system request message index
2847 * @msix_index: MSIX table index supplied by the OS
2848 * @reply: reply message frame(lower 32bit addr)
2851 * The callback handler when using scsih_issue_tm.
2853 * Return: 1 meaning mf should be freed from _base_interrupt
2854 * 0 means the mf is freed from this function.
2857 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2859 MPI2DefaultReply_t *mpi_reply;
2861 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2863 if (ioc->tm_cmds.smid != smid)
2865 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2866 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2868 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2869 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2871 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2872 complete(&ioc->tm_cmds.done);
2877 * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2878 * @ioc: per adapter object
2879 * @handle: device handle
2881 * During taskmangement request, we need to freeze the device queue.
2884 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2886 struct MPT3SAS_DEVICE *sas_device_priv_data;
2887 struct scsi_device *sdev;
2890 shost_for_each_device(sdev, ioc->shost) {
2893 sas_device_priv_data = sdev->hostdata;
2894 if (!sas_device_priv_data)
2896 if (sas_device_priv_data->sas_target->handle == handle) {
2897 sas_device_priv_data->sas_target->tm_busy = 1;
2899 ioc->ignore_loginfos = 1;
2905 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2906 * @ioc: per adapter object
2907 * @handle: device handle
2909 * During taskmangement request, we need to freeze the device queue.
2912 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2914 struct MPT3SAS_DEVICE *sas_device_priv_data;
2915 struct scsi_device *sdev;
2918 shost_for_each_device(sdev, ioc->shost) {
2921 sas_device_priv_data = sdev->hostdata;
2922 if (!sas_device_priv_data)
2924 if (sas_device_priv_data->sas_target->handle == handle) {
2925 sas_device_priv_data->sas_target->tm_busy = 0;
2927 ioc->ignore_loginfos = 0;
2933 * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2934 * @ioc: per adapter object
2935 * @channel: the channel assigned by the OS
2936 * @id: the id assigned by the OS
2938 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2939 * @smid_task: smid assigned to the task
2941 * Look whether TM has aborted the timed out SCSI command, if
2942 * TM has aborted the IO then return SUCCESS else return FAILED.
2945 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2946 uint id, uint lun, u8 type, u16 smid_task)
2949 if (smid_task <= ioc->shost->can_queue) {
2951 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2952 if (!(_scsih_scsi_lookup_find_by_target(ioc,
2956 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2957 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2958 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2965 } else if (smid_task == ioc->scsih_cmds.smid) {
2966 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2967 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2969 } else if (smid_task == ioc->ctl_cmds.smid) {
2970 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2971 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2979 * scsih_tm_post_processing - post processing of target & LUN reset
2980 * @ioc: per adapter object
2981 * @handle: device handle
2982 * @channel: the channel assigned by the OS
2983 * @id: the id assigned by the OS
2985 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2986 * @smid_task: smid assigned to the task
2988 * Post processing of target & LUN reset. Due to interrupt latency
2989 * issue it possible that interrupt for aborted IO might not be
2990 * received yet. So before returning failure status, poll the
2991 * reply descriptor pools for the reply of timed out SCSI command.
2992 * Return FAILED status if reply for timed out is not received
2993 * otherwise return SUCCESS.
2996 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2997 uint channel, uint id, uint lun, u8 type, u16 smid_task)
3001 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3006 "Poll ReplyDescriptor queues for completion of"
3007 " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3008 smid_task, type, handle);
3011 * Due to interrupt latency issues, driver may receive interrupt for
3012 * TM first and then for aborted SCSI IO command. So, poll all the
3013 * ReplyDescriptor pools before returning the FAILED status to SML.
3015 mpt3sas_base_mask_interrupts(ioc);
3016 mpt3sas_base_sync_reply_irqs(ioc, 1);
3017 mpt3sas_base_unmask_interrupts(ioc);
3019 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3023 * mpt3sas_scsih_issue_tm - main routine for sending tm requests
3024 * @ioc: per adapter struct
3025 * @handle: device handle
3026 * @channel: the channel assigned by the OS
3027 * @id: the id assigned by the OS
3029 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3030 * @smid_task: smid assigned to the task
3031 * @msix_task: MSIX table index supplied by the OS
3032 * @timeout: timeout in seconds
3033 * @tr_method: Target Reset Method
3036 * A generic API for sending task management requests to firmware.
3038 * The callback index is set inside `ioc->tm_cb_idx`.
3039 * The caller is responsible to check for outstanding commands.
3041 * Return: SUCCESS or FAILED.
3044 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3045 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3046 u8 timeout, u8 tr_method)
3048 Mpi2SCSITaskManagementRequest_t *mpi_request;
3049 Mpi2SCSITaskManagementReply_t *mpi_reply;
3050 Mpi25SCSIIORequest_t *request;
3056 lockdep_assert_held(&ioc->tm_cmds.mutex);
3058 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3059 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3063 if (ioc->shost_recovery || ioc->remove_host ||
3064 ioc->pci_error_recovery) {
3065 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3069 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3070 if (ioc_state & MPI2_DOORBELL_USED) {
3071 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3072 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3073 return (!rc) ? SUCCESS : FAILED;
3076 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3077 mpt3sas_print_fault_code(ioc, ioc_state &
3078 MPI2_DOORBELL_DATA_MASK);
3079 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3080 return (!rc) ? SUCCESS : FAILED;
3081 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3082 MPI2_IOC_STATE_COREDUMP) {
3083 mpt3sas_print_coredump_info(ioc, ioc_state &
3084 MPI2_DOORBELL_DATA_MASK);
3085 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3086 return (!rc) ? SUCCESS : FAILED;
3089 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3091 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3096 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3097 handle, type, smid_task, timeout, tr_method));
3098 ioc->tm_cmds.status = MPT3_CMD_PENDING;
3099 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3100 ioc->tm_cmds.smid = smid;
3101 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3102 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3103 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3104 mpi_request->DevHandle = cpu_to_le16(handle);
3105 mpi_request->TaskType = type;
3106 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3107 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3108 mpi_request->MsgFlags = tr_method;
3109 mpi_request->TaskMID = cpu_to_le16(smid_task);
3110 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3111 mpt3sas_scsih_set_tm_flag(ioc, handle);
3112 init_completion(&ioc->tm_cmds.done);
3113 ioc->put_smid_hi_priority(ioc, smid, msix_task);
3114 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3115 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3116 mpt3sas_check_cmd_timeout(ioc,
3117 ioc->tm_cmds.status, mpi_request,
3118 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3120 rc = mpt3sas_base_hard_reset_handler(ioc,
3122 rc = (!rc) ? SUCCESS : FAILED;
3127 /* sync IRQs in case those were busy during flush. */
3128 mpt3sas_base_sync_reply_irqs(ioc, 0);
3130 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3131 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3132 mpi_reply = ioc->tm_cmds.reply;
3134 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3135 le16_to_cpu(mpi_reply->IOCStatus),
3136 le32_to_cpu(mpi_reply->IOCLogInfo),
3137 le32_to_cpu(mpi_reply->TerminationCount)));
3138 if (ioc->logging_level & MPT_DEBUG_TM) {
3139 _scsih_response_code(ioc, mpi_reply->ResponseCode);
3140 if (mpi_reply->IOCStatus)
3141 _debug_dump_mf(mpi_request,
3142 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3147 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3150 * If DevHandle filed in smid_task's entry of request pool
3151 * doesn't match with device handle on which this task abort
3152 * TM is received then it means that TM has successfully
3153 * aborted the timed out command. Since smid_task's entry in
3154 * request pool will be memset to zero once the timed out
3155 * command is returned to the SML. If the command is not
3156 * aborted then smid_task’s entry won’t be cleared and it
3157 * will have same DevHandle value on which this task abort TM
3158 * is received and driver will return the TM status as FAILED.
3160 request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3161 if (le16_to_cpu(request->DevHandle) != handle)
3164 ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3165 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3166 handle, timeout, tr_method, smid_task, msix_task);
3170 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3171 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3172 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3173 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3176 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3185 mpt3sas_scsih_clear_tm_flag(ioc, handle);
3186 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3190 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3191 uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3192 u16 msix_task, u8 timeout, u8 tr_method)
3196 mutex_lock(&ioc->tm_cmds.mutex);
3197 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3198 smid_task, msix_task, timeout, tr_method);
3199 mutex_unlock(&ioc->tm_cmds.mutex);
3205 * _scsih_tm_display_info - displays info about the device
3206 * @ioc: per adapter struct
3207 * @scmd: pointer to scsi command object
3209 * Called by task management callback handlers.
3212 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3214 struct scsi_target *starget = scmd->device->sdev_target;
3215 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3216 struct _sas_device *sas_device = NULL;
3217 struct _pcie_device *pcie_device = NULL;
3218 unsigned long flags;
3219 char *device_str = NULL;
3223 if (ioc->hide_ir_msg)
3224 device_str = "WarpDrive";
3226 device_str = "volume";
3228 scsi_print_command(scmd);
3229 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3230 starget_printk(KERN_INFO, starget,
3231 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
3232 device_str, priv_target->handle,
3233 device_str, (unsigned long long)priv_target->sas_address);
3235 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3236 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3237 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3239 starget_printk(KERN_INFO, starget,
3240 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3241 pcie_device->handle,
3242 (unsigned long long)pcie_device->wwid,
3243 pcie_device->port_num);
3244 if (pcie_device->enclosure_handle != 0)
3245 starget_printk(KERN_INFO, starget,
3246 "enclosure logical id(0x%016llx), slot(%d)\n",
3247 (unsigned long long)
3248 pcie_device->enclosure_logical_id,
3250 if (pcie_device->connector_name[0] != '\0')
3251 starget_printk(KERN_INFO, starget,
3252 "enclosure level(0x%04x), connector name( %s)\n",
3253 pcie_device->enclosure_level,
3254 pcie_device->connector_name);
3255 pcie_device_put(pcie_device);
3257 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3260 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3261 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3263 if (priv_target->flags &
3264 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3265 starget_printk(KERN_INFO, starget,
3266 "volume handle(0x%04x), "
3267 "volume wwid(0x%016llx)\n",
3268 sas_device->volume_handle,
3269 (unsigned long long)sas_device->volume_wwid);
3271 starget_printk(KERN_INFO, starget,
3272 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3274 (unsigned long long)sas_device->sas_address,
3277 _scsih_display_enclosure_chassis_info(NULL, sas_device,
3280 sas_device_put(sas_device);
3282 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3287 * scsih_abort - eh threads main abort routine
3288 * @scmd: pointer to scsi command object
3290 * Return: SUCCESS if command aborted else FAILED
3293 scsih_abort(struct scsi_cmnd *scmd)
3295 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3296 struct MPT3SAS_DEVICE *sas_device_priv_data;
3297 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3302 struct _pcie_device *pcie_device = NULL;
3303 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3304 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3305 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3306 (scmd->request->timeout / HZ) * 1000);
3307 _scsih_tm_display_info(ioc, scmd);
3309 sas_device_priv_data = scmd->device->hostdata;
3310 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3312 sdev_printk(KERN_INFO, scmd->device,
3313 "device been deleted! scmd(0x%p)\n", scmd);
3314 scmd->result = DID_NO_CONNECT << 16;
3315 scmd->scsi_done(scmd);
3320 /* check for completed command */
3321 if (st == NULL || st->cb_idx == 0xFF) {
3322 sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3323 "driver, assuming scmd(0x%p) might have completed\n", scmd);
3324 scmd->result = DID_RESET << 16;
3329 /* for hidden raid components and volumes this is not supported */
3330 if (sas_device_priv_data->sas_target->flags &
3331 MPT_TARGET_FLAGS_RAID_COMPONENT ||
3332 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3333 scmd->result = DID_RESET << 16;
3338 mpt3sas_halt_firmware(ioc);
3340 handle = sas_device_priv_data->sas_target->handle;
3341 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3342 if (pcie_device && (!ioc->tm_custom_handling) &&
3343 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3344 timeout = ioc->nvme_abort_timeout;
3345 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3346 scmd->device->id, scmd->device->lun,
3347 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3348 st->smid, st->msix_io, timeout, 0);
3349 /* Command must be cleared after abort */
3350 if (r == SUCCESS && st->cb_idx != 0xFF)
3353 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3354 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3356 pcie_device_put(pcie_device);
3361 * scsih_dev_reset - eh threads main device reset routine
3362 * @scmd: pointer to scsi command object
3364 * Return: SUCCESS if command aborted else FAILED
3367 scsih_dev_reset(struct scsi_cmnd *scmd)
3369 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3370 struct MPT3SAS_DEVICE *sas_device_priv_data;
3371 struct _sas_device *sas_device = NULL;
3372 struct _pcie_device *pcie_device = NULL;
3378 struct scsi_target *starget = scmd->device->sdev_target;
3379 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3381 sdev_printk(KERN_INFO, scmd->device,
3382 "attempting device reset! scmd(0x%p)\n", scmd);
3383 _scsih_tm_display_info(ioc, scmd);
3385 sas_device_priv_data = scmd->device->hostdata;
3386 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3388 sdev_printk(KERN_INFO, scmd->device,
3389 "device been deleted! scmd(0x%p)\n", scmd);
3390 scmd->result = DID_NO_CONNECT << 16;
3391 scmd->scsi_done(scmd);
3396 /* for hidden raid components obtain the volume_handle */
3398 if (sas_device_priv_data->sas_target->flags &
3399 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3400 sas_device = mpt3sas_get_sdev_from_target(ioc,
3403 handle = sas_device->volume_handle;
3405 handle = sas_device_priv_data->sas_target->handle;
3408 scmd->result = DID_RESET << 16;
3413 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3415 if (pcie_device && (!ioc->tm_custom_handling) &&
3416 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3417 tr_timeout = pcie_device->reset_timeout;
3418 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3420 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3422 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3423 scmd->device->id, scmd->device->lun,
3424 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3425 tr_timeout, tr_method);
3426 /* Check for busy commands after reset */
3427 if (r == SUCCESS && scsi_device_busy(scmd->device))
3430 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3431 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3434 sas_device_put(sas_device);
3436 pcie_device_put(pcie_device);
3442 * scsih_target_reset - eh threads main target reset routine
3443 * @scmd: pointer to scsi command object
3445 * Return: SUCCESS if command aborted else FAILED
3448 scsih_target_reset(struct scsi_cmnd *scmd)
3450 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3451 struct MPT3SAS_DEVICE *sas_device_priv_data;
3452 struct _sas_device *sas_device = NULL;
3453 struct _pcie_device *pcie_device = NULL;
3458 struct scsi_target *starget = scmd->device->sdev_target;
3459 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3461 starget_printk(KERN_INFO, starget,
3462 "attempting target reset! scmd(0x%p)\n", scmd);
3463 _scsih_tm_display_info(ioc, scmd);
3465 sas_device_priv_data = scmd->device->hostdata;
3466 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3468 starget_printk(KERN_INFO, starget,
3469 "target been deleted! scmd(0x%p)\n", scmd);
3470 scmd->result = DID_NO_CONNECT << 16;
3471 scmd->scsi_done(scmd);
3476 /* for hidden raid components obtain the volume_handle */
3478 if (sas_device_priv_data->sas_target->flags &
3479 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3480 sas_device = mpt3sas_get_sdev_from_target(ioc,
3483 handle = sas_device->volume_handle;
3485 handle = sas_device_priv_data->sas_target->handle;
3488 scmd->result = DID_RESET << 16;
3493 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3495 if (pcie_device && (!ioc->tm_custom_handling) &&
3496 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3497 tr_timeout = pcie_device->reset_timeout;
3498 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3500 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3501 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3502 scmd->device->id, 0,
3503 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3504 tr_timeout, tr_method);
3505 /* Check for busy commands after reset */
3506 if (r == SUCCESS && atomic_read(&starget->target_busy))
3509 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3510 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3513 sas_device_put(sas_device);
3515 pcie_device_put(pcie_device);
3521 * scsih_host_reset - eh threads main host reset routine
3522 * @scmd: pointer to scsi command object
3524 * Return: SUCCESS if command aborted else FAILED
3527 scsih_host_reset(struct scsi_cmnd *scmd)
3529 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3532 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3533 scsi_print_command(scmd);
3535 if (ioc->is_driver_loading || ioc->remove_host) {
3536 ioc_info(ioc, "Blocking the host reset\n");
3541 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3542 r = (retval < 0) ? FAILED : SUCCESS;
3544 ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3545 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3551 * _scsih_fw_event_add - insert and queue up fw_event
3552 * @ioc: per adapter object
3553 * @fw_event: object describing the event
3554 * Context: This function will acquire ioc->fw_event_lock.
3556 * This adds the firmware event object into link list, then queues it up to
3557 * be processed from user context.
3560 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3562 unsigned long flags;
3564 if (ioc->firmware_event_thread == NULL)
3567 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3568 fw_event_work_get(fw_event);
3569 INIT_LIST_HEAD(&fw_event->list);
3570 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3571 INIT_WORK(&fw_event->work, _firmware_event_work);
3572 fw_event_work_get(fw_event);
3573 queue_work(ioc->firmware_event_thread, &fw_event->work);
3574 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3578 * _scsih_fw_event_del_from_list - delete fw_event from the list
3579 * @ioc: per adapter object
3580 * @fw_event: object describing the event
3581 * Context: This function will acquire ioc->fw_event_lock.
3583 * If the fw_event is on the fw_event_list, remove it and do a put.
3586 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3589 unsigned long flags;
3591 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3592 if (!list_empty(&fw_event->list)) {
3593 list_del_init(&fw_event->list);
3594 fw_event_work_put(fw_event);
3596 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3601 * mpt3sas_send_trigger_data_event - send event for processing trigger data
3602 * @ioc: per adapter object
3603 * @event_data: trigger event data
3606 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3607 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3609 struct fw_event_work *fw_event;
3612 if (ioc->is_driver_loading)
3614 sz = sizeof(*event_data);
3615 fw_event = alloc_fw_event_work(sz);
3618 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3619 fw_event->ioc = ioc;
3620 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3621 _scsih_fw_event_add(ioc, fw_event);
3622 fw_event_work_put(fw_event);
3626 * _scsih_error_recovery_delete_devices - remove devices not responding
3627 * @ioc: per adapter object
3630 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3632 struct fw_event_work *fw_event;
3634 if (ioc->is_driver_loading)
3636 fw_event = alloc_fw_event_work(0);
3639 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3640 fw_event->ioc = ioc;
3641 _scsih_fw_event_add(ioc, fw_event);
3642 fw_event_work_put(fw_event);
3646 * mpt3sas_port_enable_complete - port enable completed (fake event)
3647 * @ioc: per adapter object
3650 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3652 struct fw_event_work *fw_event;
3654 fw_event = alloc_fw_event_work(0);
3657 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3658 fw_event->ioc = ioc;
3659 _scsih_fw_event_add(ioc, fw_event);
3660 fw_event_work_put(fw_event);
3663 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3665 unsigned long flags;
3666 struct fw_event_work *fw_event = NULL;
3668 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3669 if (!list_empty(&ioc->fw_event_list)) {
3670 fw_event = list_first_entry(&ioc->fw_event_list,
3671 struct fw_event_work, list);
3672 list_del_init(&fw_event->list);
3674 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3680 * _scsih_fw_event_cleanup_queue - cleanup event queue
3681 * @ioc: per adapter object
3683 * Walk the firmware event queue, either killing timers, or waiting
3684 * for outstanding events to complete
3686 * Context: task, can sleep
3689 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3691 struct fw_event_work *fw_event;
3693 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3694 !ioc->firmware_event_thread)
3697 ioc->fw_events_cleanup = 1;
3698 while ((fw_event = dequeue_next_fw_event(ioc)) ||
3699 (fw_event = ioc->current_event)) {
3701 * Wait on the fw_event to complete. If this returns 1, then
3702 * the event was never executed, and we need a put for the
3703 * reference the work had on the fw_event.
3705 * If it did execute, we wait for it to finish, and the put will
3706 * happen from _firmware_event_work()
3708 if (cancel_work_sync(&fw_event->work))
3709 fw_event_work_put(fw_event);
3711 fw_event_work_put(fw_event);
3713 ioc->fw_events_cleanup = 0;
3717 * _scsih_internal_device_block - block the sdev device
3718 * @sdev: per device object
3719 * @sas_device_priv_data : per device driver private data
3721 * make sure device is blocked without error, if not
3725 _scsih_internal_device_block(struct scsi_device *sdev,
3726 struct MPT3SAS_DEVICE *sas_device_priv_data)
3730 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3731 sas_device_priv_data->sas_target->handle);
3732 sas_device_priv_data->block = 1;
3734 r = scsi_internal_device_block_nowait(sdev);
3736 sdev_printk(KERN_WARNING, sdev,
3737 "device_block failed with return(%d) for handle(0x%04x)\n",
3738 r, sas_device_priv_data->sas_target->handle);
3742 * _scsih_internal_device_unblock - unblock the sdev device
3743 * @sdev: per device object
3744 * @sas_device_priv_data : per device driver private data
3745 * make sure device is unblocked without error, if not retry
3746 * by blocking and then unblocking
3750 _scsih_internal_device_unblock(struct scsi_device *sdev,
3751 struct MPT3SAS_DEVICE *sas_device_priv_data)
3755 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3756 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3757 sas_device_priv_data->block = 0;
3758 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3760 /* The device has been set to SDEV_RUNNING by SD layer during
3761 * device addition but the request queue is still stopped by
3762 * our earlier block call. We need to perform a block again
3763 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3765 sdev_printk(KERN_WARNING, sdev,
3766 "device_unblock failed with return(%d) for handle(0x%04x) "
3767 "performing a block followed by an unblock\n",
3768 r, sas_device_priv_data->sas_target->handle);
3769 sas_device_priv_data->block = 1;
3770 r = scsi_internal_device_block_nowait(sdev);
3772 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3773 "failed with return(%d) for handle(0x%04x)\n",
3774 r, sas_device_priv_data->sas_target->handle);
3776 sas_device_priv_data->block = 0;
3777 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3779 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3780 " failed with return(%d) for handle(0x%04x)\n",
3781 r, sas_device_priv_data->sas_target->handle);
3786 * _scsih_ublock_io_all_device - unblock every device
3787 * @ioc: per adapter object
3789 * change the device state from block to running
3792 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3794 struct MPT3SAS_DEVICE *sas_device_priv_data;
3795 struct scsi_device *sdev;
3797 shost_for_each_device(sdev, ioc->shost) {
3798 sas_device_priv_data = sdev->hostdata;
3799 if (!sas_device_priv_data)
3801 if (!sas_device_priv_data->block)
3804 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3805 "device_running, handle(0x%04x)\n",
3806 sas_device_priv_data->sas_target->handle));
3807 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3813 * _scsih_ublock_io_device - prepare device to be deleted
3814 * @ioc: per adapter object
3815 * @sas_address: sas address
3816 * @port: hba port entry
3818 * unblock then put device in offline state
3821 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3822 u64 sas_address, struct hba_port *port)
3824 struct MPT3SAS_DEVICE *sas_device_priv_data;
3825 struct scsi_device *sdev;
3827 shost_for_each_device(sdev, ioc->shost) {
3828 sas_device_priv_data = sdev->hostdata;
3829 if (!sas_device_priv_data)
3831 if (sas_device_priv_data->sas_target->sas_address
3834 if (sas_device_priv_data->sas_target->port != port)
3836 if (sas_device_priv_data->block)
3837 _scsih_internal_device_unblock(sdev,
3838 sas_device_priv_data);
3843 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3844 * @ioc: per adapter object
3846 * During device pull we need to appropriately set the sdev state.
3849 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3851 struct MPT3SAS_DEVICE *sas_device_priv_data;
3852 struct scsi_device *sdev;
3854 shost_for_each_device(sdev, ioc->shost) {
3855 sas_device_priv_data = sdev->hostdata;
3856 if (!sas_device_priv_data)
3858 if (sas_device_priv_data->block)
3860 if (sas_device_priv_data->ignore_delay_remove) {
3861 sdev_printk(KERN_INFO, sdev,
3862 "%s skip device_block for SES handle(0x%04x)\n",
3863 __func__, sas_device_priv_data->sas_target->handle);
3866 _scsih_internal_device_block(sdev, sas_device_priv_data);
3871 * _scsih_block_io_device - set the device state to SDEV_BLOCK
3872 * @ioc: per adapter object
3873 * @handle: device handle
3875 * During device pull we need to appropriately set the sdev state.
3878 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3880 struct MPT3SAS_DEVICE *sas_device_priv_data;
3881 struct scsi_device *sdev;
3882 struct _sas_device *sas_device;
3884 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3886 shost_for_each_device(sdev, ioc->shost) {
3887 sas_device_priv_data = sdev->hostdata;
3888 if (!sas_device_priv_data)
3890 if (sas_device_priv_data->sas_target->handle != handle)
3892 if (sas_device_priv_data->block)
3894 if (sas_device && sas_device->pend_sas_rphy_add)
3896 if (sas_device_priv_data->ignore_delay_remove) {
3897 sdev_printk(KERN_INFO, sdev,
3898 "%s skip device_block for SES handle(0x%04x)\n",
3899 __func__, sas_device_priv_data->sas_target->handle);
3902 _scsih_internal_device_block(sdev, sas_device_priv_data);
3906 sas_device_put(sas_device);
3910 * _scsih_block_io_to_children_attached_to_ex
3911 * @ioc: per adapter object
3912 * @sas_expander: the sas_device object
3914 * This routine set sdev state to SDEV_BLOCK for all devices
3915 * attached to this expander. This function called when expander is
3919 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3920 struct _sas_node *sas_expander)
3922 struct _sas_port *mpt3sas_port;
3923 struct _sas_device *sas_device;
3924 struct _sas_node *expander_sibling;
3925 unsigned long flags;
3930 list_for_each_entry(mpt3sas_port,
3931 &sas_expander->sas_port_list, port_list) {
3932 if (mpt3sas_port->remote_identify.device_type ==
3934 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3935 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3936 mpt3sas_port->remote_identify.sas_address,
3937 mpt3sas_port->hba_port);
3939 set_bit(sas_device->handle,
3940 ioc->blocking_handles);
3941 sas_device_put(sas_device);
3943 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3947 list_for_each_entry(mpt3sas_port,
3948 &sas_expander->sas_port_list, port_list) {
3950 if (mpt3sas_port->remote_identify.device_type ==
3951 SAS_EDGE_EXPANDER_DEVICE ||
3952 mpt3sas_port->remote_identify.device_type ==
3953 SAS_FANOUT_EXPANDER_DEVICE) {
3955 mpt3sas_scsih_expander_find_by_sas_address(
3956 ioc, mpt3sas_port->remote_identify.sas_address,
3957 mpt3sas_port->hba_port);
3958 _scsih_block_io_to_children_attached_to_ex(ioc,
3965 * _scsih_block_io_to_children_attached_directly
3966 * @ioc: per adapter object
3967 * @event_data: topology change event data
3969 * This routine set sdev state to SDEV_BLOCK for all devices
3970 * direct attached during device pull.
3973 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3974 Mpi2EventDataSasTopologyChangeList_t *event_data)
3980 for (i = 0; i < event_data->NumEntries; i++) {
3981 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3984 reason_code = event_data->PHY[i].PhyStatus &
3985 MPI2_EVENT_SAS_TOPO_RC_MASK;
3986 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3987 _scsih_block_io_device(ioc, handle);
3992 * _scsih_block_io_to_pcie_children_attached_directly
3993 * @ioc: per adapter object
3994 * @event_data: topology change event data
3996 * This routine set sdev state to SDEV_BLOCK for all devices
3997 * direct attached during device pull/reconnect.
4000 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4001 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4007 for (i = 0; i < event_data->NumEntries; i++) {
4009 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4012 reason_code = event_data->PortEntry[i].PortStatus;
4014 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4015 _scsih_block_io_device(ioc, handle);
4019 * _scsih_tm_tr_send - send task management request
4020 * @ioc: per adapter object
4021 * @handle: device handle
4022 * Context: interrupt time.
4024 * This code is to initiate the device removal handshake protocol
4025 * with controller firmware. This function will issue target reset
4026 * using high priority request queue. It will send a sas iounit
4027 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
4029 * This is designed to send muliple task management request at the same
4030 * time to the fifo. If the fifo is full, we will append the request,
4031 * and process it in a future completion.
4034 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4036 Mpi2SCSITaskManagementRequest_t *mpi_request;
4038 struct _sas_device *sas_device = NULL;
4039 struct _pcie_device *pcie_device = NULL;
4040 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4041 u64 sas_address = 0;
4042 unsigned long flags;
4043 struct _tr_list *delayed_tr;
4046 struct hba_port *port = NULL;
4048 if (ioc->pci_error_recovery) {
4050 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4054 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4055 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4057 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4062 /* if PD, then return */
4063 if (test_bit(handle, ioc->pd_handles))
4066 clear_bit(handle, ioc->pend_os_device_add);
4068 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4069 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4070 if (sas_device && sas_device->starget &&
4071 sas_device->starget->hostdata) {
4072 sas_target_priv_data = sas_device->starget->hostdata;
4073 sas_target_priv_data->deleted = 1;
4074 sas_address = sas_device->sas_address;
4075 port = sas_device->port;
4077 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4079 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4080 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4081 if (pcie_device && pcie_device->starget &&
4082 pcie_device->starget->hostdata) {
4083 sas_target_priv_data = pcie_device->starget->hostdata;
4084 sas_target_priv_data->deleted = 1;
4085 sas_address = pcie_device->wwid;
4087 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4088 if (pcie_device && (!ioc->tm_custom_handling) &&
4089 (!(mpt3sas_scsih_is_pcie_scsi_device(
4090 pcie_device->device_info))))
4092 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4094 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4096 if (sas_target_priv_data) {
4098 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4099 handle, (u64)sas_address));
4101 if (sas_device->enclosure_handle != 0)
4103 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4104 (u64)sas_device->enclosure_logical_id,
4106 if (sas_device->connector_name[0] != '\0')
4108 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4109 sas_device->enclosure_level,
4110 sas_device->connector_name));
4111 } else if (pcie_device) {
4112 if (pcie_device->enclosure_handle != 0)
4114 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4115 (u64)pcie_device->enclosure_logical_id,
4116 pcie_device->slot));
4117 if (pcie_device->connector_name[0] != '\0')
4119 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4120 pcie_device->enclosure_level,
4121 pcie_device->connector_name));
4123 _scsih_ublock_io_device(ioc, sas_address, port);
4124 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4127 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4129 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4132 INIT_LIST_HEAD(&delayed_tr->list);
4133 delayed_tr->handle = handle;
4134 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4136 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4142 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4143 handle, smid, ioc->tm_tr_cb_idx));
4144 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4145 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4146 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4147 mpi_request->DevHandle = cpu_to_le16(handle);
4148 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4149 mpi_request->MsgFlags = tr_method;
4150 set_bit(handle, ioc->device_remove_in_progress);
4151 ioc->put_smid_hi_priority(ioc, smid, 0);
4152 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4156 sas_device_put(sas_device);
4158 pcie_device_put(pcie_device);
4162 * _scsih_tm_tr_complete -
4163 * @ioc: per adapter object
4164 * @smid: system request message index
4165 * @msix_index: MSIX table index supplied by the OS
4166 * @reply: reply message frame(lower 32bit addr)
4167 * Context: interrupt time.
4169 * This is the target reset completion routine.
4170 * This code is part of the code to initiate the device removal
4171 * handshake protocol with controller firmware.
4172 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4174 * Return: 1 meaning mf should be freed from _base_interrupt
4175 * 0 means the mf is freed from this function.
4178 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4182 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4183 Mpi2SCSITaskManagementReply_t *mpi_reply =
4184 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4185 Mpi2SasIoUnitControlRequest_t *mpi_request;
4188 struct _sc_list *delayed_sc;
4190 if (ioc->pci_error_recovery) {
4192 ioc_info(ioc, "%s: host in pci error recovery\n",
4196 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4197 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4199 ioc_info(ioc, "%s: host is not operational\n",
4203 if (unlikely(!mpi_reply)) {
4204 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4205 __FILE__, __LINE__, __func__);
4208 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4209 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4210 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4212 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4214 le16_to_cpu(mpi_reply->DevHandle), smid));
4218 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4220 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4221 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4222 le32_to_cpu(mpi_reply->IOCLogInfo),
4223 le32_to_cpu(mpi_reply->TerminationCount)));
4225 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4226 if (!smid_sas_ctrl) {
4227 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4229 return _scsih_check_for_pending_tm(ioc, smid);
4230 INIT_LIST_HEAD(&delayed_sc->list);
4231 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4232 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4234 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4236 return _scsih_check_for_pending_tm(ioc, smid);
4240 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4241 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4242 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4243 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4244 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4245 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4246 mpi_request->DevHandle = mpi_request_tm->DevHandle;
4247 ioc->put_smid_default(ioc, smid_sas_ctrl);
4249 return _scsih_check_for_pending_tm(ioc, smid);
4252 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4253 * issue to IOC or not.
4254 * @ioc: per adapter object
4255 * @scmd: pointer to scsi command object
4257 * Returns true if scmd can be issued to IOC otherwise returns false.
4259 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4260 struct scsi_cmnd *scmd)
4263 if (ioc->pci_error_recovery)
4266 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4267 if (ioc->remove_host)
4273 if (ioc->remove_host) {
4275 switch (scmd->cmnd[0]) {
4276 case SYNCHRONIZE_CACHE:
4288 * _scsih_sas_control_complete - completion routine
4289 * @ioc: per adapter object
4290 * @smid: system request message index
4291 * @msix_index: MSIX table index supplied by the OS
4292 * @reply: reply message frame(lower 32bit addr)
4293 * Context: interrupt time.
4295 * This is the sas iounit control completion routine.
4296 * This code is part of the code to initiate the device removal
4297 * handshake protocol with controller firmware.
4299 * Return: 1 meaning mf should be freed from _base_interrupt
4300 * 0 means the mf is freed from this function.
4303 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4304 u8 msix_index, u32 reply)
4306 Mpi2SasIoUnitControlReply_t *mpi_reply =
4307 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4309 if (likely(mpi_reply)) {
4311 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4312 le16_to_cpu(mpi_reply->DevHandle), smid,
4313 le16_to_cpu(mpi_reply->IOCStatus),
4314 le32_to_cpu(mpi_reply->IOCLogInfo)));
4315 if (le16_to_cpu(mpi_reply->IOCStatus) ==
4316 MPI2_IOCSTATUS_SUCCESS) {
4317 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4318 ioc->device_remove_in_progress);
4321 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4322 __FILE__, __LINE__, __func__);
4324 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4328 * _scsih_tm_tr_volume_send - send target reset request for volumes
4329 * @ioc: per adapter object
4330 * @handle: device handle
4331 * Context: interrupt time.
4333 * This is designed to send muliple task management request at the same
4334 * time to the fifo. If the fifo is full, we will append the request,
4335 * and process it in a future completion.
4338 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4340 Mpi2SCSITaskManagementRequest_t *mpi_request;
4342 struct _tr_list *delayed_tr;
4344 if (ioc->pci_error_recovery) {
4346 ioc_info(ioc, "%s: host reset in progress!\n",
4351 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4353 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4356 INIT_LIST_HEAD(&delayed_tr->list);
4357 delayed_tr->handle = handle;
4358 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4360 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4366 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4367 handle, smid, ioc->tm_tr_volume_cb_idx));
4368 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4369 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4370 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4371 mpi_request->DevHandle = cpu_to_le16(handle);
4372 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4373 ioc->put_smid_hi_priority(ioc, smid, 0);
4377 * _scsih_tm_volume_tr_complete - target reset completion
4378 * @ioc: per adapter object
4379 * @smid: system request message index
4380 * @msix_index: MSIX table index supplied by the OS
4381 * @reply: reply message frame(lower 32bit addr)
4382 * Context: interrupt time.
4384 * Return: 1 meaning mf should be freed from _base_interrupt
4385 * 0 means the mf is freed from this function.
4388 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4389 u8 msix_index, u32 reply)
4392 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4393 Mpi2SCSITaskManagementReply_t *mpi_reply =
4394 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4396 if (ioc->shost_recovery || ioc->pci_error_recovery) {
4398 ioc_info(ioc, "%s: host reset in progress!\n",
4402 if (unlikely(!mpi_reply)) {
4403 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4404 __FILE__, __LINE__, __func__);
4408 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4409 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4410 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4412 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4413 handle, le16_to_cpu(mpi_reply->DevHandle),
4419 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4420 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4421 le32_to_cpu(mpi_reply->IOCLogInfo),
4422 le32_to_cpu(mpi_reply->TerminationCount)));
4424 return _scsih_check_for_pending_tm(ioc, smid);
4428 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4429 * @ioc: per adapter object
4430 * @smid: system request message index
4432 * @event_context: used to track events uniquely
4434 * Context - processed in interrupt context.
4437 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4440 Mpi2EventAckRequest_t *ack_request;
4441 int i = smid - ioc->internal_smid;
4442 unsigned long flags;
4444 /* Without releasing the smid just update the
4445 * call back index and reuse the same smid for
4446 * processing this delayed request
4448 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4449 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4450 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4453 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4454 le16_to_cpu(event), smid, ioc->base_cb_idx));
4455 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4456 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4457 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4458 ack_request->Event = event;
4459 ack_request->EventContext = event_context;
4460 ack_request->VF_ID = 0; /* TODO */
4461 ack_request->VP_ID = 0;
4462 ioc->put_smid_default(ioc, smid);
4466 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4467 * sas_io_unit_ctrl messages
4468 * @ioc: per adapter object
4469 * @smid: system request message index
4470 * @handle: device handle
4472 * Context - processed in interrupt context.
4475 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4476 u16 smid, u16 handle)
4478 Mpi2SasIoUnitControlRequest_t *mpi_request;
4480 int i = smid - ioc->internal_smid;
4481 unsigned long flags;
4483 if (ioc->remove_host) {
4485 ioc_info(ioc, "%s: host has been removed\n",
4488 } else if (ioc->pci_error_recovery) {
4490 ioc_info(ioc, "%s: host in pci error recovery\n",
4494 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4495 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4497 ioc_info(ioc, "%s: host is not operational\n",
4502 /* Without releasing the smid just update the
4503 * call back index and reuse the same smid for
4504 * processing this delayed request
4506 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4507 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4508 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4511 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4512 handle, smid, ioc->tm_sas_control_cb_idx));
4513 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4514 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4515 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4516 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4517 mpi_request->DevHandle = cpu_to_le16(handle);
4518 ioc->put_smid_default(ioc, smid);
4522 * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
4523 * @ioc: per adapter object
4524 * @smid: system request message index
4526 * Context: Executed in interrupt context
4528 * This will check delayed internal messages list, and process the
4531 * Return: 1 meaning mf should be freed from _base_interrupt
4532 * 0 means the mf is freed from this function.
4535 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4537 struct _sc_list *delayed_sc;
4538 struct _event_ack_list *delayed_event_ack;
4540 if (!list_empty(&ioc->delayed_event_ack_list)) {
4541 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4542 struct _event_ack_list, list);
4543 _scsih_issue_delayed_event_ack(ioc, smid,
4544 delayed_event_ack->Event, delayed_event_ack->EventContext);
4545 list_del(&delayed_event_ack->list);
4546 kfree(delayed_event_ack);
4550 if (!list_empty(&ioc->delayed_sc_list)) {
4551 delayed_sc = list_entry(ioc->delayed_sc_list.next,
4552 struct _sc_list, list);
4553 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4554 delayed_sc->handle);
4555 list_del(&delayed_sc->list);
4563 * _scsih_check_for_pending_tm - check for pending task management
4564 * @ioc: per adapter object
4565 * @smid: system request message index
4567 * This will check delayed target reset list, and feed the
4570 * Return: 1 meaning mf should be freed from _base_interrupt
4571 * 0 means the mf is freed from this function.
4574 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4576 struct _tr_list *delayed_tr;
4578 if (!list_empty(&ioc->delayed_tr_volume_list)) {
4579 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4580 struct _tr_list, list);
4581 mpt3sas_base_free_smid(ioc, smid);
4582 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4583 list_del(&delayed_tr->list);
4588 if (!list_empty(&ioc->delayed_tr_list)) {
4589 delayed_tr = list_entry(ioc->delayed_tr_list.next,
4590 struct _tr_list, list);
4591 mpt3sas_base_free_smid(ioc, smid);
4592 _scsih_tm_tr_send(ioc, delayed_tr->handle);
4593 list_del(&delayed_tr->list);
4602 * _scsih_check_topo_delete_events - sanity check on topo events
4603 * @ioc: per adapter object
4604 * @event_data: the event data payload
4606 * This routine added to better handle cable breaker.
4608 * This handles the case where driver receives multiple expander
4609 * add and delete events in a single shot. When there is a delete event
4610 * the routine will void any pending add events waiting in the event queue.
4613 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4614 Mpi2EventDataSasTopologyChangeList_t *event_data)
4616 struct fw_event_work *fw_event;
4617 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4618 u16 expander_handle;
4619 struct _sas_node *sas_expander;
4620 unsigned long flags;
4624 for (i = 0 ; i < event_data->NumEntries; i++) {
4625 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4628 reason_code = event_data->PHY[i].PhyStatus &
4629 MPI2_EVENT_SAS_TOPO_RC_MASK;
4630 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4631 _scsih_tm_tr_send(ioc, handle);
4634 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4635 if (expander_handle < ioc->sas_hba.num_phys) {
4636 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4639 if (event_data->ExpStatus ==
4640 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4641 /* put expander attached devices into blocking state */
4642 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4643 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4645 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4646 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4648 handle = find_first_bit(ioc->blocking_handles,
4649 ioc->facts.MaxDevHandle);
4650 if (handle < ioc->facts.MaxDevHandle)
4651 _scsih_block_io_device(ioc, handle);
4652 } while (test_and_clear_bit(handle, ioc->blocking_handles));
4653 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4654 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4656 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4659 /* mark ignore flag for pending events */
4660 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4661 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4662 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4665 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4666 fw_event->event_data;
4667 if (local_event_data->ExpStatus ==
4668 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4669 local_event_data->ExpStatus ==
4670 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4671 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4674 ioc_info(ioc, "setting ignoring flag\n"));
4675 fw_event->ignore = 1;
4679 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4683 * _scsih_check_pcie_topo_remove_events - sanity check on topo
4685 * @ioc: per adapter object
4686 * @event_data: the event data payload
4688 * This handles the case where driver receives multiple switch
4689 * or device add and delete events in a single shot. When there
4690 * is a delete event the routine will void any pending add
4691 * events waiting in the event queue.
4694 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4695 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4697 struct fw_event_work *fw_event;
4698 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4699 unsigned long flags;
4701 u16 handle, switch_handle;
4703 for (i = 0; i < event_data->NumEntries; i++) {
4705 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4708 reason_code = event_data->PortEntry[i].PortStatus;
4709 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4710 _scsih_tm_tr_send(ioc, handle);
4713 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4714 if (!switch_handle) {
4715 _scsih_block_io_to_pcie_children_attached_directly(
4719 /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4720 if ((event_data->SwitchStatus
4721 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4722 (event_data->SwitchStatus ==
4723 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4724 _scsih_block_io_to_pcie_children_attached_directly(
4727 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4730 /* mark ignore flag for pending events */
4731 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4732 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4733 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4737 (Mpi26EventDataPCIeTopologyChangeList_t *)
4738 fw_event->event_data;
4739 if (local_event_data->SwitchStatus ==
4740 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4741 local_event_data->SwitchStatus ==
4742 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4743 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4746 ioc_info(ioc, "setting ignoring flag for switch event\n"));
4747 fw_event->ignore = 1;
4751 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4755 * _scsih_set_volume_delete_flag - setting volume delete flag
4756 * @ioc: per adapter object
4757 * @handle: device handle
4759 * This returns nothing.
4762 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4764 struct _raid_device *raid_device;
4765 struct MPT3SAS_TARGET *sas_target_priv_data;
4766 unsigned long flags;
4768 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4769 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4770 if (raid_device && raid_device->starget &&
4771 raid_device->starget->hostdata) {
4772 sas_target_priv_data =
4773 raid_device->starget->hostdata;
4774 sas_target_priv_data->deleted = 1;
4776 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4777 handle, (u64)raid_device->wwid));
4779 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4783 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4784 * @handle: input handle
4785 * @a: handle for volume a
4786 * @b: handle for volume b
4788 * IR firmware only supports two raid volumes. The purpose of this
4789 * routine is to set the volume handle in either a or b. When the given
4790 * input handle is non-zero, or when a and b have not been set before.
4793 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4795 if (!handle || handle == *a || handle == *b)
4804 * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4805 * @ioc: per adapter object
4806 * @event_data: the event data payload
4807 * Context: interrupt time.
4809 * This routine will send target reset to volume, followed by target
4810 * resets to the PDs. This is called when a PD has been removed, or
4811 * volume has been deleted or removed. When the target reset is sent
4812 * to volume, the PD target resets need to be queued to start upon
4813 * completion of the volume target reset.
4816 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4817 Mpi2EventDataIrConfigChangeList_t *event_data)
4819 Mpi2EventIrConfigElement_t *element;
4821 u16 handle, volume_handle, a, b;
4822 struct _tr_list *delayed_tr;
4827 if (ioc->is_warpdrive)
4830 /* Volume Resets for Deleted or Removed */
4831 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4832 for (i = 0; i < event_data->NumElements; i++, element++) {
4833 if (le32_to_cpu(event_data->Flags) &
4834 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4836 if (element->ReasonCode ==
4837 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4838 element->ReasonCode ==
4839 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4840 volume_handle = le16_to_cpu(element->VolDevHandle);
4841 _scsih_set_volume_delete_flag(ioc, volume_handle);
4842 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4846 /* Volume Resets for UNHIDE events */
4847 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4848 for (i = 0; i < event_data->NumElements; i++, element++) {
4849 if (le32_to_cpu(event_data->Flags) &
4850 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4852 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4853 volume_handle = le16_to_cpu(element->VolDevHandle);
4854 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4859 _scsih_tm_tr_volume_send(ioc, a);
4861 _scsih_tm_tr_volume_send(ioc, b);
4863 /* PD target resets */
4864 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4865 for (i = 0; i < event_data->NumElements; i++, element++) {
4866 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4868 handle = le16_to_cpu(element->PhysDiskDevHandle);
4869 volume_handle = le16_to_cpu(element->VolDevHandle);
4870 clear_bit(handle, ioc->pd_handles);
4872 _scsih_tm_tr_send(ioc, handle);
4873 else if (volume_handle == a || volume_handle == b) {
4874 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4875 BUG_ON(!delayed_tr);
4876 INIT_LIST_HEAD(&delayed_tr->list);
4877 delayed_tr->handle = handle;
4878 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4880 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4883 _scsih_tm_tr_send(ioc, handle);
4889 * _scsih_check_volume_delete_events - set delete flag for volumes
4890 * @ioc: per adapter object
4891 * @event_data: the event data payload
4892 * Context: interrupt time.
4894 * This will handle the case when the cable connected to entire volume is
4895 * pulled. We will take care of setting the deleted flag so normal IO will
4899 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4900 Mpi2EventDataIrVolume_t *event_data)
4904 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4906 state = le32_to_cpu(event_data->NewValue);
4907 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4908 MPI2_RAID_VOL_STATE_FAILED)
4909 _scsih_set_volume_delete_flag(ioc,
4910 le16_to_cpu(event_data->VolDevHandle));
4914 * _scsih_temp_threshold_events - display temperature threshold exceeded events
4915 * @ioc: per adapter object
4916 * @event_data: the temp threshold event data
4917 * Context: interrupt time.
4920 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4921 Mpi2EventDataTemperature_t *event_data)
4924 if (ioc->temp_sensors_count >= event_data->SensorNum) {
4925 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4926 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4927 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4928 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4929 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4930 event_data->SensorNum);
4931 ioc_err(ioc, "Current Temp In Celsius: %d\n",
4932 event_data->CurrentTemperature);
4933 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4934 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4935 if ((doorbell & MPI2_IOC_STATE_MASK) ==
4936 MPI2_IOC_STATE_FAULT) {
4937 mpt3sas_print_fault_code(ioc,
4938 doorbell & MPI2_DOORBELL_DATA_MASK);
4939 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4940 MPI2_IOC_STATE_COREDUMP) {
4941 mpt3sas_print_coredump_info(ioc,
4942 doorbell & MPI2_DOORBELL_DATA_MASK);
4948 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4950 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4952 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4956 return test_and_set_bit(0, &priv->ata_command_pending);
4958 clear_bit(0, &priv->ata_command_pending);
4963 * _scsih_flush_running_cmds - completing outstanding commands.
4964 * @ioc: per adapter object
4966 * The flushing out of all pending scmd commands following host reset,
4967 * where all IO is dropped to the floor.
4970 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4972 struct scsi_cmnd *scmd;
4973 struct scsiio_tracker *st;
4977 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4978 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4982 _scsih_set_satl_pending(scmd, false);
4983 st = scsi_cmd_priv(scmd);
4984 mpt3sas_base_clear_st(ioc, st);
4985 scsi_dma_unmap(scmd);
4986 if (ioc->pci_error_recovery || ioc->remove_host)
4987 scmd->result = DID_NO_CONNECT << 16;
4989 scmd->result = DID_RESET << 16;
4990 scmd->scsi_done(scmd);
4992 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4996 * _scsih_setup_eedp - setup MPI request for EEDP transfer
4997 * @ioc: per adapter object
4998 * @scmd: pointer to scsi command object
4999 * @mpi_request: pointer to the SCSI_IO request message frame
5001 * Supporting protection 1 and 3.
5004 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5005 Mpi25SCSIIORequest_t *mpi_request)
5008 unsigned char prot_op = scsi_get_prot_op(scmd);
5009 unsigned char prot_type = scsi_get_prot_type(scmd);
5010 Mpi25SCSIIORequest_t *mpi_request_3v =
5011 (Mpi25SCSIIORequest_t *)mpi_request;
5013 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
5016 if (prot_op == SCSI_PROT_READ_STRIP)
5017 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5018 else if (prot_op == SCSI_PROT_WRITE_INSERT)
5019 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5023 switch (prot_type) {
5024 case SCSI_PROT_DIF_TYPE1:
5025 case SCSI_PROT_DIF_TYPE2:
5028 * enable ref/guard checking
5029 * auto increment ref tag
5031 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
5032 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
5033 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5034 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5035 cpu_to_be32(t10_pi_ref_tag(scmd->request));
5038 case SCSI_PROT_DIF_TYPE3:
5041 * enable guard checking
5043 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5048 mpi_request_3v->EEDPBlockSize =
5049 cpu_to_le16(scmd->device->sector_size);
5051 if (ioc->is_gen35_ioc)
5052 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5053 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5057 * _scsih_eedp_error_handling - return sense code for EEDP errors
5058 * @scmd: pointer to scsi command object
5059 * @ioc_status: ioc status
5062 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5066 switch (ioc_status) {
5067 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5070 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5073 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5080 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
5082 scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
5083 SAM_STAT_CHECK_CONDITION;
5087 * scsih_qcmd - main scsi request entry point
5088 * @shost: SCSI host pointer
5089 * @scmd: pointer to scsi command object
5091 * The callback index is set inside `ioc->scsi_io_cb_idx`.
5093 * Return: 0 on success. If there's a failure, return either:
5094 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
5095 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
5098 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5100 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5101 struct MPT3SAS_DEVICE *sas_device_priv_data;
5102 struct MPT3SAS_TARGET *sas_target_priv_data;
5103 struct _raid_device *raid_device;
5104 struct request *rq = scmd->request;
5106 Mpi25SCSIIORequest_t *mpi_request;
5107 struct _pcie_device *pcie_device = NULL;
5112 if (ioc->logging_level & MPT_DEBUG_SCSI)
5113 scsi_print_command(scmd);
5115 sas_device_priv_data = scmd->device->hostdata;
5116 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5117 scmd->result = DID_NO_CONNECT << 16;
5118 scmd->scsi_done(scmd);
5122 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5123 scmd->result = DID_NO_CONNECT << 16;
5124 scmd->scsi_done(scmd);
5128 sas_target_priv_data = sas_device_priv_data->sas_target;
5130 /* invalid device handle */
5131 handle = sas_target_priv_data->handle;
5132 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5133 scmd->result = DID_NO_CONNECT << 16;
5134 scmd->scsi_done(scmd);
5139 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5140 /* host recovery or link resets sent via IOCTLs */
5141 return SCSI_MLQUEUE_HOST_BUSY;
5142 } else if (sas_target_priv_data->deleted) {
5143 /* device has been deleted */
5144 scmd->result = DID_NO_CONNECT << 16;
5145 scmd->scsi_done(scmd);
5147 } else if (sas_target_priv_data->tm_busy ||
5148 sas_device_priv_data->block) {
5149 /* device busy with task management */
5150 return SCSI_MLQUEUE_DEVICE_BUSY;
5154 * Bug work around for firmware SATL handling. The loop
5155 * is based on atomic operations and ensures consistency
5156 * since we're lockless at this point
5159 if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5160 return SCSI_MLQUEUE_DEVICE_BUSY;
5161 } while (_scsih_set_satl_pending(scmd, true));
5163 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5164 mpi_control = MPI2_SCSIIO_CONTROL_READ;
5165 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5166 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5168 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5171 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5172 /* NCQ Prio supported, make sure control indicated high priority */
5173 if (sas_device_priv_data->ncq_prio_enable) {
5174 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5175 if (class == IOPRIO_CLASS_RT)
5176 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5178 /* Make sure Device is not raid volume.
5179 * We do not expose raid functionality to upper layer for warpdrive.
5181 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5182 && !scsih_is_nvme(&scmd->device->sdev_gendev))
5183 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5184 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5186 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5188 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5189 _scsih_set_satl_pending(scmd, false);
5192 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5193 memset(mpi_request, 0, ioc->request_sz);
5194 _scsih_setup_eedp(ioc, scmd, mpi_request);
5196 if (scmd->cmd_len == 32)
5197 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5198 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5199 if (sas_device_priv_data->sas_target->flags &
5200 MPT_TARGET_FLAGS_RAID_COMPONENT)
5201 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5203 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5204 mpi_request->DevHandle = cpu_to_le16(handle);
5205 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5206 mpi_request->Control = cpu_to_le32(mpi_control);
5207 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5208 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5209 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5210 mpi_request->SenseBufferLowAddress =
5211 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5212 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5213 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5215 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5217 if (mpi_request->DataLength) {
5218 pcie_device = sas_target_priv_data->pcie_dev;
5219 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5220 mpt3sas_base_free_smid(ioc, smid);
5221 _scsih_set_satl_pending(scmd, false);
5225 ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5227 raid_device = sas_target_priv_data->raid_device;
5228 if (raid_device && raid_device->direct_io_enabled)
5229 mpt3sas_setup_direct_io(ioc, scmd,
5230 raid_device, mpi_request);
5232 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5233 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5234 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5235 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5236 ioc->put_smid_fast_path(ioc, smid, handle);
5238 ioc->put_smid_scsi_io(ioc, smid,
5239 le16_to_cpu(mpi_request->DevHandle));
5241 ioc->put_smid_default(ioc, smid);
5245 return SCSI_MLQUEUE_HOST_BUSY;
5249 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5250 * @sense_buffer: sense data returned by target
5251 * @data: normalized skey/asc/ascq
5254 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5256 if ((sense_buffer[0] & 0x7F) >= 0x72) {
5257 /* descriptor format */
5258 data->skey = sense_buffer[1] & 0x0F;
5259 data->asc = sense_buffer[2];
5260 data->ascq = sense_buffer[3];
5263 data->skey = sense_buffer[2] & 0x0F;
5264 data->asc = sense_buffer[12];
5265 data->ascq = sense_buffer[13];
5270 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
5271 * @ioc: per adapter object
5272 * @scmd: pointer to scsi command object
5273 * @mpi_reply: reply mf payload returned from firmware
5276 * scsi_status - SCSI Status code returned from target device
5277 * scsi_state - state info associated with SCSI_IO determined by ioc
5278 * ioc_status - ioc supplied status info
5281 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5282 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5286 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5287 MPI2_IOCSTATUS_MASK;
5288 u8 scsi_state = mpi_reply->SCSIState;
5289 u8 scsi_status = mpi_reply->SCSIStatus;
5290 char *desc_ioc_state = NULL;
5291 char *desc_scsi_status = NULL;
5292 char *desc_scsi_state = ioc->tmp_string;
5293 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5294 struct _sas_device *sas_device = NULL;
5295 struct _pcie_device *pcie_device = NULL;
5296 struct scsi_target *starget = scmd->device->sdev_target;
5297 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5298 char *device_str = NULL;
5302 if (ioc->hide_ir_msg)
5303 device_str = "WarpDrive";
5305 device_str = "volume";
5307 if (log_info == 0x31170000)
5310 switch (ioc_status) {
5311 case MPI2_IOCSTATUS_SUCCESS:
5312 desc_ioc_state = "success";
5314 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5315 desc_ioc_state = "invalid function";
5317 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5318 desc_ioc_state = "scsi recovered error";
5320 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5321 desc_ioc_state = "scsi invalid dev handle";
5323 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5324 desc_ioc_state = "scsi device not there";
5326 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5327 desc_ioc_state = "scsi data overrun";
5329 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5330 desc_ioc_state = "scsi data underrun";
5332 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5333 desc_ioc_state = "scsi io data error";
5335 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5336 desc_ioc_state = "scsi protocol error";
5338 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5339 desc_ioc_state = "scsi task terminated";
5341 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5342 desc_ioc_state = "scsi residual mismatch";
5344 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5345 desc_ioc_state = "scsi task mgmt failed";
5347 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5348 desc_ioc_state = "scsi ioc terminated";
5350 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5351 desc_ioc_state = "scsi ext terminated";
5353 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5354 desc_ioc_state = "eedp guard error";
5356 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5357 desc_ioc_state = "eedp ref tag error";
5359 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5360 desc_ioc_state = "eedp app tag error";
5362 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5363 desc_ioc_state = "insufficient power";
5366 desc_ioc_state = "unknown";
5370 switch (scsi_status) {
5371 case MPI2_SCSI_STATUS_GOOD:
5372 desc_scsi_status = "good";
5374 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5375 desc_scsi_status = "check condition";
5377 case MPI2_SCSI_STATUS_CONDITION_MET:
5378 desc_scsi_status = "condition met";
5380 case MPI2_SCSI_STATUS_BUSY:
5381 desc_scsi_status = "busy";
5383 case MPI2_SCSI_STATUS_INTERMEDIATE:
5384 desc_scsi_status = "intermediate";
5386 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5387 desc_scsi_status = "intermediate condmet";
5389 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5390 desc_scsi_status = "reservation conflict";
5392 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5393 desc_scsi_status = "command terminated";
5395 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5396 desc_scsi_status = "task set full";
5398 case MPI2_SCSI_STATUS_ACA_ACTIVE:
5399 desc_scsi_status = "aca active";
5401 case MPI2_SCSI_STATUS_TASK_ABORTED:
5402 desc_scsi_status = "task aborted";
5405 desc_scsi_status = "unknown";
5409 desc_scsi_state[0] = '\0';
5411 desc_scsi_state = " ";
5412 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5413 strcat(desc_scsi_state, "response info ");
5414 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5415 strcat(desc_scsi_state, "state terminated ");
5416 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5417 strcat(desc_scsi_state, "no status ");
5418 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5419 strcat(desc_scsi_state, "autosense failed ");
5420 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5421 strcat(desc_scsi_state, "autosense valid ");
5423 scsi_print_command(scmd);
5425 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5426 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5427 device_str, (u64)priv_target->sas_address);
5428 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5429 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5431 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5432 (u64)pcie_device->wwid, pcie_device->port_num);
5433 if (pcie_device->enclosure_handle != 0)
5434 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5435 (u64)pcie_device->enclosure_logical_id,
5437 if (pcie_device->connector_name[0])
5438 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5439 pcie_device->enclosure_level,
5440 pcie_device->connector_name);
5441 pcie_device_put(pcie_device);
5444 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5446 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5447 (u64)sas_device->sas_address, sas_device->phy);
5449 _scsih_display_enclosure_chassis_info(ioc, sas_device,
5452 sas_device_put(sas_device);
5456 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5457 le16_to_cpu(mpi_reply->DevHandle),
5458 desc_ioc_state, ioc_status, smid);
5459 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5460 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5461 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5462 le16_to_cpu(mpi_reply->TaskTag),
5463 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5464 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5465 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5467 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5468 struct sense_info data;
5469 _scsih_normalize_sense(scmd->sense_buffer, &data);
5470 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5471 data.skey, data.asc, data.ascq,
5472 le32_to_cpu(mpi_reply->SenseCount));
5474 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5475 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5476 response_bytes = (u8 *)&response_info;
5477 _scsih_response_code(ioc, response_bytes[0]);
5482 * _scsih_turn_on_pfa_led - illuminate PFA LED
5483 * @ioc: per adapter object
5484 * @handle: device handle
5488 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5490 Mpi2SepReply_t mpi_reply;
5491 Mpi2SepRequest_t mpi_request;
5492 struct _sas_device *sas_device;
5494 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5498 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5499 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5500 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5501 mpi_request.SlotStatus =
5502 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5503 mpi_request.DevHandle = cpu_to_le16(handle);
5504 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5505 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5506 &mpi_request)) != 0) {
5507 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5508 __FILE__, __LINE__, __func__);
5511 sas_device->pfa_led_on = 1;
5513 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5515 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5516 le16_to_cpu(mpi_reply.IOCStatus),
5517 le32_to_cpu(mpi_reply.IOCLogInfo)));
5521 sas_device_put(sas_device);
5525 * _scsih_turn_off_pfa_led - turn off Fault LED
5526 * @ioc: per adapter object
5527 * @sas_device: sas device whose PFA LED has to turned off
5531 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5532 struct _sas_device *sas_device)
5534 Mpi2SepReply_t mpi_reply;
5535 Mpi2SepRequest_t mpi_request;
5537 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5538 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5539 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5540 mpi_request.SlotStatus = 0;
5541 mpi_request.Slot = cpu_to_le16(sas_device->slot);
5542 mpi_request.DevHandle = 0;
5543 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5544 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5545 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5546 &mpi_request)) != 0) {
5547 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5548 __FILE__, __LINE__, __func__);
5552 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5554 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5555 le16_to_cpu(mpi_reply.IOCStatus),
5556 le32_to_cpu(mpi_reply.IOCLogInfo)));
5562 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5563 * @ioc: per adapter object
5564 * @handle: device handle
5565 * Context: interrupt.
5568 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5570 struct fw_event_work *fw_event;
5572 fw_event = alloc_fw_event_work(0);
5575 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5576 fw_event->device_handle = handle;
5577 fw_event->ioc = ioc;
5578 _scsih_fw_event_add(ioc, fw_event);
5579 fw_event_work_put(fw_event);
5583 * _scsih_smart_predicted_fault - process smart errors
5584 * @ioc: per adapter object
5585 * @handle: device handle
5586 * Context: interrupt.
5589 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5591 struct scsi_target *starget;
5592 struct MPT3SAS_TARGET *sas_target_priv_data;
5593 Mpi2EventNotificationReply_t *event_reply;
5594 Mpi2EventDataSasDeviceStatusChange_t *event_data;
5595 struct _sas_device *sas_device;
5597 unsigned long flags;
5599 /* only handle non-raid devices */
5600 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5601 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5605 starget = sas_device->starget;
5606 sas_target_priv_data = starget->hostdata;
5608 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5609 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5612 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5614 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5616 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5617 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5619 /* insert into event log */
5620 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5621 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5622 event_reply = kzalloc(sz, GFP_ATOMIC);
5624 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5625 __FILE__, __LINE__, __func__);
5629 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5630 event_reply->Event =
5631 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5632 event_reply->MsgLength = sz/4;
5633 event_reply->EventDataLength =
5634 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5635 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5636 event_reply->EventData;
5637 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5638 event_data->ASC = 0x5D;
5639 event_data->DevHandle = cpu_to_le16(handle);
5640 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5641 mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5645 sas_device_put(sas_device);
5649 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5654 * _scsih_io_done - scsi request callback
5655 * @ioc: per adapter object
5656 * @smid: system request message index
5657 * @msix_index: MSIX table index supplied by the OS
5658 * @reply: reply message frame(lower 32bit addr)
5660 * Callback handler when using _scsih_qcmd.
5662 * Return: 1 meaning mf should be freed from _base_interrupt
5663 * 0 means the mf is freed from this function.
5666 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5668 Mpi25SCSIIORequest_t *mpi_request;
5669 Mpi2SCSIIOReply_t *mpi_reply;
5670 struct scsi_cmnd *scmd;
5671 struct scsiio_tracker *st;
5677 struct MPT3SAS_DEVICE *sas_device_priv_data;
5678 u32 response_code = 0;
5680 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5682 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5686 _scsih_set_satl_pending(scmd, false);
5688 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5690 if (mpi_reply == NULL) {
5691 scmd->result = DID_OK << 16;
5695 sas_device_priv_data = scmd->device->hostdata;
5696 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5697 sas_device_priv_data->sas_target->deleted) {
5698 scmd->result = DID_NO_CONNECT << 16;
5701 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5704 * WARPDRIVE: If direct_io is set then it is directIO,
5705 * the failed direct I/O should be redirected to volume
5707 st = scsi_cmd_priv(scmd);
5708 if (st->direct_io &&
5709 ((ioc_status & MPI2_IOCSTATUS_MASK)
5710 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5713 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5714 mpi_request->DevHandle =
5715 cpu_to_le16(sas_device_priv_data->sas_target->handle);
5716 ioc->put_smid_scsi_io(ioc, smid,
5717 sas_device_priv_data->sas_target->handle);
5720 /* turning off TLR */
5721 scsi_state = mpi_reply->SCSIState;
5722 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5724 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5725 if (!sas_device_priv_data->tlr_snoop_check) {
5726 sas_device_priv_data->tlr_snoop_check++;
5727 if ((!ioc->is_warpdrive &&
5728 !scsih_is_raid(&scmd->device->sdev_gendev) &&
5729 !scsih_is_nvme(&scmd->device->sdev_gendev))
5730 && sas_is_tlr_enabled(scmd->device) &&
5731 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5732 sas_disable_tlr(scmd->device);
5733 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5737 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5738 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5739 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5740 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5743 ioc_status &= MPI2_IOCSTATUS_MASK;
5744 scsi_status = mpi_reply->SCSIStatus;
5746 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5747 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5748 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5749 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5750 ioc_status = MPI2_IOCSTATUS_SUCCESS;
5753 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5754 struct sense_info data;
5755 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5757 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5758 le32_to_cpu(mpi_reply->SenseCount));
5759 memcpy(scmd->sense_buffer, sense_data, sz);
5760 _scsih_normalize_sense(scmd->sense_buffer, &data);
5761 /* failure prediction threshold exceeded */
5762 if (data.asc == 0x5D)
5763 _scsih_smart_predicted_fault(ioc,
5764 le16_to_cpu(mpi_reply->DevHandle));
5765 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5767 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5768 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5769 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5770 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5771 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5773 switch (ioc_status) {
5774 case MPI2_IOCSTATUS_BUSY:
5775 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5776 scmd->result = SAM_STAT_BUSY;
5779 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5780 scmd->result = DID_NO_CONNECT << 16;
5783 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5784 if (sas_device_priv_data->block) {
5785 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5788 if (log_info == 0x31110630) {
5789 if (scmd->retries > 2) {
5790 scmd->result = DID_NO_CONNECT << 16;
5791 scsi_device_set_state(scmd->device,
5794 scmd->result = DID_SOFT_ERROR << 16;
5795 scmd->device->expecting_cc_ua = 1;
5798 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5799 scmd->result = DID_RESET << 16;
5801 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5802 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5803 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5804 scmd->result = DID_RESET << 16;
5807 scmd->result = DID_SOFT_ERROR << 16;
5809 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5810 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5811 scmd->result = DID_RESET << 16;
5814 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5815 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5816 scmd->result = DID_SOFT_ERROR << 16;
5818 scmd->result = (DID_OK << 16) | scsi_status;
5821 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5822 scmd->result = (DID_OK << 16) | scsi_status;
5824 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5827 if (xfer_cnt < scmd->underflow) {
5828 if (scsi_status == SAM_STAT_BUSY)
5829 scmd->result = SAM_STAT_BUSY;
5831 scmd->result = DID_SOFT_ERROR << 16;
5832 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5833 MPI2_SCSI_STATE_NO_SCSI_STATUS))
5834 scmd->result = DID_SOFT_ERROR << 16;
5835 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5836 scmd->result = DID_RESET << 16;
5837 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5838 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5839 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5840 scmd->result = (DRIVER_SENSE << 24) |
5841 SAM_STAT_CHECK_CONDITION;
5842 scmd->sense_buffer[0] = 0x70;
5843 scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5844 scmd->sense_buffer[12] = 0x20;
5845 scmd->sense_buffer[13] = 0;
5849 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5850 scsi_set_resid(scmd, 0);
5852 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5853 case MPI2_IOCSTATUS_SUCCESS:
5854 scmd->result = (DID_OK << 16) | scsi_status;
5855 if (response_code ==
5856 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5857 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5858 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5859 scmd->result = DID_SOFT_ERROR << 16;
5860 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5861 scmd->result = DID_RESET << 16;
5864 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5865 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5866 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5867 _scsih_eedp_error_handling(scmd, ioc_status);
5870 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5871 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5872 case MPI2_IOCSTATUS_INVALID_SGL:
5873 case MPI2_IOCSTATUS_INTERNAL_ERROR:
5874 case MPI2_IOCSTATUS_INVALID_FIELD:
5875 case MPI2_IOCSTATUS_INVALID_STATE:
5876 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5877 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5878 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5880 scmd->result = DID_SOFT_ERROR << 16;
5885 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5886 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5890 scsi_dma_unmap(scmd);
5891 mpt3sas_base_free_smid(ioc, smid);
5892 scmd->scsi_done(scmd);
5897 * _scsih_update_vphys_after_reset - update the Port's
5898 * vphys_list after reset
5899 * @ioc: per adapter object
5904 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5908 Mpi2ConfigReply_t mpi_reply;
5909 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5910 u16 attached_handle;
5911 u64 attached_sas_addr;
5912 u8 found = 0, port_id;
5913 Mpi2SasPhyPage0_t phy_pg0;
5914 struct hba_port *port, *port_next, *mport;
5915 struct virtual_phy *vphy, *vphy_next;
5916 struct _sas_device *sas_device;
5919 * Mark all the vphys objects as dirty.
5921 list_for_each_entry_safe(port, port_next,
5922 &ioc->port_table_list, list) {
5923 if (!port->vphys_mask)
5925 list_for_each_entry_safe(vphy, vphy_next,
5926 &port->vphys_list, list) {
5927 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5932 * Read SASIOUnitPage0 to get each HBA Phy's data.
5934 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) +
5935 (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t));
5936 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5937 if (!sas_iounit_pg0) {
5938 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5939 __FILE__, __LINE__, __func__);
5942 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5943 sas_iounit_pg0, sz)) != 0)
5945 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5946 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5949 * Loop over each HBA Phy.
5951 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5953 * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
5955 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
5956 MPI2_SAS_NEG_LINK_RATE_1_5)
5959 * Check whether Phy is connected to SEP device or not,
5960 * if it is SEP device then read the Phy's SASPHYPage0 data to
5961 * determine whether Phy is a virtual Phy or not. if it is
5962 * virtual phy then it is conformed that the attached remote
5963 * device is a HBA's vSES device.
5966 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
5967 MPI2_SAS_DEVICE_INFO_SEP))
5970 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5972 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5973 __FILE__, __LINE__, __func__);
5977 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
5978 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
5981 * Get the vSES device's SAS Address.
5983 attached_handle = le16_to_cpu(
5984 sas_iounit_pg0->PhyData[i].AttachedDevHandle);
5985 if (_scsih_get_sas_address(ioc, attached_handle,
5986 &attached_sas_addr) != 0) {
5987 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5988 __FILE__, __LINE__, __func__);
5993 port = port_next = NULL;
5995 * Loop over each virtual_phy object from
5996 * each port's vphys_list.
5998 list_for_each_entry_safe(port,
5999 port_next, &ioc->port_table_list, list) {
6000 if (!port->vphys_mask)
6002 list_for_each_entry_safe(vphy, vphy_next,
6003 &port->vphys_list, list) {
6005 * Continue with next virtual_phy object
6006 * if the object is not marked as dirty.
6008 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6012 * Continue with next virtual_phy object
6013 * if the object's SAS Address is not equals
6014 * to current Phy's vSES device SAS Address.
6016 if (vphy->sas_address != attached_sas_addr)
6019 * Enable current Phy number bit in object's
6022 if (!(vphy->phy_mask & (1 << i)))
6023 vphy->phy_mask = (1 << i);
6025 * Get hba_port object from hba_port table
6026 * corresponding to current phy's Port ID.
6027 * if there is no hba_port object corresponding
6028 * to Phy's Port ID then create a new hba_port
6029 * object & add to hba_port table.
6031 port_id = sas_iounit_pg0->PhyData[i].Port;
6032 mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6035 sizeof(struct hba_port), GFP_KERNEL);
6038 mport->port_id = port_id;
6040 "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6041 __func__, mport, mport->port_id);
6042 list_add_tail(&mport->list,
6043 &ioc->port_table_list);
6046 * If mport & port pointers are not pointing to
6047 * same hba_port object then it means that vSES
6048 * device's Port ID got changed after reset and
6049 * hence move current virtual_phy object from
6050 * port's vphys_list to mport's vphys_list.
6052 if (port != mport) {
6053 if (!mport->vphys_mask)
6055 &mport->vphys_list);
6056 mport->vphys_mask |= (1 << i);
6057 port->vphys_mask &= ~(1 << i);
6058 list_move(&vphy->list,
6059 &mport->vphys_list);
6060 sas_device = mpt3sas_get_sdev_by_addr(
6061 ioc, attached_sas_addr, port);
6063 sas_device->port = mport;
6066 * Earlier while updating the hba_port table,
6067 * it is determined that there is no other
6068 * direct attached device with mport's Port ID,
6069 * Hence mport was marked as dirty. Only vSES
6070 * device has this Port ID, so unmark the mport
6073 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6074 mport->sas_address = 0;
6075 mport->phy_mask = 0;
6077 ~HBA_PORT_FLAG_DIRTY_PORT;
6080 * Unmark current virtual_phy object as dirty.
6082 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6091 kfree(sas_iounit_pg0);
6095 * _scsih_get_port_table_after_reset - Construct temporary port table
6096 * @ioc: per adapter object
6097 * @port_table: address where port table needs to be constructed
6099 * return number of HBA port entries available after reset.
6102 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6103 struct hba_port *port_table)
6107 Mpi2ConfigReply_t mpi_reply;
6108 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6109 u16 attached_handle;
6110 u64 attached_sas_addr;
6111 u8 found = 0, port_count = 0, port_id;
6113 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6114 * sizeof(Mpi2SasIOUnit0PhyData_t));
6115 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6116 if (!sas_iounit_pg0) {
6117 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6118 __FILE__, __LINE__, __func__);
6122 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6123 sas_iounit_pg0, sz)) != 0)
6125 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6126 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6128 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6130 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6131 MPI2_SAS_NEG_LINK_RATE_1_5)
6134 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6135 if (_scsih_get_sas_address(
6136 ioc, attached_handle, &attached_sas_addr) != 0) {
6137 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6138 __FILE__, __LINE__, __func__);
6142 for (j = 0; j < port_count; j++) {
6143 port_id = sas_iounit_pg0->PhyData[i].Port;
6144 if (port_table[j].port_id == port_id &&
6145 port_table[j].sas_address == attached_sas_addr) {
6146 port_table[j].phy_mask |= (1 << i);
6155 port_id = sas_iounit_pg0->PhyData[i].Port;
6156 port_table[port_count].port_id = port_id;
6157 port_table[port_count].phy_mask = (1 << i);
6158 port_table[port_count].sas_address = attached_sas_addr;
6162 kfree(sas_iounit_pg0);
6166 enum hba_port_matched_codes {
6168 MATCHED_WITH_ADDR_AND_PHYMASK,
6169 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6170 MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6175 * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
6176 * from HBA port table
6177 * @ioc: per adapter object
6178 * @port_entry: hba port entry from temporary port table which needs to be
6179 * searched for matched entry in the HBA port table
6180 * @matched_port_entry: save matched hba port entry here
6181 * @count: count of matched entries
6183 * return type of matched entry found.
6185 static enum hba_port_matched_codes
6186 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6187 struct hba_port *port_entry,
6188 struct hba_port **matched_port_entry, int *count)
6190 struct hba_port *port_table_entry, *matched_port = NULL;
6191 enum hba_port_matched_codes matched_code = NOT_MATCHED;
6193 *matched_port_entry = NULL;
6195 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6196 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6199 if ((port_table_entry->sas_address == port_entry->sas_address)
6200 && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6201 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6202 matched_port = port_table_entry;
6206 if ((port_table_entry->sas_address == port_entry->sas_address)
6207 && (port_table_entry->phy_mask & port_entry->phy_mask)
6208 && (port_table_entry->port_id == port_entry->port_id)) {
6209 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6210 matched_port = port_table_entry;
6214 if ((port_table_entry->sas_address == port_entry->sas_address)
6215 && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6217 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6219 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6220 matched_port = port_table_entry;
6224 if (port_table_entry->sas_address == port_entry->sas_address) {
6226 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6228 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6230 matched_code = MATCHED_WITH_ADDR;
6231 matched_port = port_table_entry;
6236 *matched_port_entry = matched_port;
6237 if (matched_code == MATCHED_WITH_ADDR)
6239 return matched_code;
6243 * _scsih_del_phy_part_of_anther_port - remove phy if it
6244 * is a part of anther port
6245 *@ioc: per adapter object
6246 *@port_table: port table after reset
6247 *@index: hba port entry index
6248 *@port_count: number of ports available after host reset
6249 *@offset: HBA phy bit offset
6253 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6254 struct hba_port *port_table,
6255 int index, u8 port_count, int offset)
6257 struct _sas_node *sas_node = &ioc->sas_hba;
6260 for (i = 0; i < port_count; i++) {
6264 if (port_table[i].phy_mask & (1 << offset)) {
6265 mpt3sas_transport_del_phy_from_an_existing_port(
6266 ioc, sas_node, &sas_node->phy[offset]);
6272 port_table[index].phy_mask |= (1 << offset);
6276 * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
6278 *@ioc: per adapter object
6279 *@hba_port_entry: hba port table entry
6280 *@port_table: temporary port table
6281 *@index: hba port entry index
6282 *@port_count: number of ports available after host reset
6286 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6287 struct hba_port *hba_port_entry, struct hba_port *port_table,
6288 int index, int port_count)
6290 u32 phy_mask, offset = 0;
6291 struct _sas_node *sas_node = &ioc->sas_hba;
6293 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6295 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6296 if (phy_mask & (1 << offset)) {
6297 if (!(port_table[index].phy_mask & (1 << offset))) {
6298 _scsih_del_phy_part_of_anther_port(
6299 ioc, port_table, index, port_count,
6303 if (sas_node->phy[offset].phy_belongs_to_port)
6304 mpt3sas_transport_del_phy_from_an_existing_port(
6305 ioc, sas_node, &sas_node->phy[offset]);
6306 mpt3sas_transport_add_phy_to_an_existing_port(
6307 ioc, sas_node, &sas_node->phy[offset],
6308 hba_port_entry->sas_address,
6315 * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
6316 * @ioc: per adapter object
6321 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6323 struct hba_port *port, *port_next;
6324 struct virtual_phy *vphy, *vphy_next;
6326 list_for_each_entry_safe(port, port_next,
6327 &ioc->port_table_list, list) {
6328 if (!port->vphys_mask)
6330 list_for_each_entry_safe(vphy, vphy_next,
6331 &port->vphys_list, list) {
6332 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6333 drsprintk(ioc, ioc_info(ioc,
6334 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6335 vphy, port->port_id,
6337 port->vphys_mask &= ~vphy->phy_mask;
6338 list_del(&vphy->list);
6342 if (!port->vphys_mask && !port->sas_address)
6343 port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6348 * _scsih_del_dirty_port_entries - delete dirty port entries from port list
6350 *@ioc: per adapter object
6354 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6356 struct hba_port *port, *port_next;
6358 list_for_each_entry_safe(port, port_next,
6359 &ioc->port_table_list, list) {
6360 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6361 port->flags & HBA_PORT_FLAG_NEW_PORT)
6364 drsprintk(ioc, ioc_info(ioc,
6365 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6366 port, port->port_id, port->phy_mask));
6367 list_del(&port->list);
6373 * _scsih_sas_port_refresh - Update HBA port table after host reset
6374 * @ioc: per adapter object
6377 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6380 struct hba_port *port_table;
6381 struct hba_port *port_table_entry;
6382 struct hba_port *port_entry = NULL;
6383 int i, j, count = 0, lcount = 0;
6387 drsprintk(ioc, ioc_info(ioc,
6388 "updating ports for sas_host(0x%016llx)\n",
6389 (unsigned long long)ioc->sas_hba.sas_address));
6391 port_table = kcalloc(ioc->sas_hba.num_phys,
6392 sizeof(struct hba_port), GFP_KERNEL);
6396 port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6400 drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6401 for (j = 0; j < port_count; j++)
6402 drsprintk(ioc, ioc_info(ioc,
6403 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6404 port_table[j].port_id,
6405 port_table[j].phy_mask, port_table[j].sas_address));
6407 list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6408 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6410 drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6411 port_table_entry = NULL;
6412 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6413 drsprintk(ioc, ioc_info(ioc,
6414 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6415 port_table_entry->port_id,
6416 port_table_entry->phy_mask,
6417 port_table_entry->sas_address));
6420 for (j = 0; j < port_count; j++) {
6421 ret = _scsih_look_and_get_matched_port_entry(ioc,
6422 &port_table[j], &port_entry, &count);
6424 drsprintk(ioc, ioc_info(ioc,
6425 "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6426 port_table[j].sas_address,
6427 port_table[j].port_id));
6432 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6433 case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6434 _scsih_add_or_del_phys_from_existing_port(ioc,
6435 port_entry, port_table, j, port_count);
6437 case MATCHED_WITH_ADDR:
6438 sas_addr = port_table[j].sas_address;
6439 for (i = 0; i < port_count; i++) {
6440 if (port_table[i].sas_address == sas_addr)
6444 if (count > 1 || lcount > 1)
6447 _scsih_add_or_del_phys_from_existing_port(ioc,
6448 port_entry, port_table, j, port_count);
6454 if (port_entry->port_id != port_table[j].port_id)
6455 port_entry->port_id = port_table[j].port_id;
6456 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6457 port_entry->phy_mask = port_table[j].phy_mask;
6460 port_table_entry = NULL;
6464 * _scsih_alloc_vphy - allocate virtual_phy object
6465 * @ioc: per adapter object
6466 * @port_id: Port ID number
6467 * @phy_num: HBA Phy number
6469 * Returns allocated virtual_phy object.
6471 static struct virtual_phy *
6472 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6474 struct virtual_phy *vphy;
6475 struct hba_port *port;
6477 port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6481 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6483 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6487 if (!port->vphys_mask)
6488 INIT_LIST_HEAD(&port->vphys_list);
6491 * Enable bit corresponding to HBA phy number on its
6492 * parent hba_port object's vphys_mask field.
6494 port->vphys_mask |= (1 << phy_num);
6495 vphy->phy_mask |= (1 << phy_num);
6497 list_add_tail(&vphy->list, &port->vphys_list);
6500 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6501 vphy, port->port_id, phy_num);
6507 * _scsih_sas_host_refresh - refreshing sas host object contents
6508 * @ioc: per adapter object
6511 * During port enable, fw will send topology events for every device. Its
6512 * possible that the handles may change from the previous setting, so this
6513 * code keeping handles updating if changed.
6516 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6521 Mpi2ConfigReply_t mpi_reply;
6522 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6523 u16 attached_handle;
6524 u8 link_rate, port_id;
6525 struct hba_port *port;
6526 Mpi2SasPhyPage0_t phy_pg0;
6529 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6530 (u64)ioc->sas_hba.sas_address));
6532 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6533 * sizeof(Mpi2SasIOUnit0PhyData_t));
6534 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6535 if (!sas_iounit_pg0) {
6536 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6537 __FILE__, __LINE__, __func__);
6541 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6542 sas_iounit_pg0, sz)) != 0)
6544 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6545 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6547 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6548 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6550 ioc->sas_hba.handle = le16_to_cpu(
6551 sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6552 port_id = sas_iounit_pg0->PhyData[i].Port;
6553 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6554 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6558 port->port_id = port_id;
6560 "hba_port entry: %p, port: %d is added to hba_port list\n",
6561 port, port->port_id);
6562 if (ioc->shost_recovery)
6563 port->flags = HBA_PORT_FLAG_NEW_PORT;
6564 list_add_tail(&port->list, &ioc->port_table_list);
6567 * Check whether current Phy belongs to HBA vSES device or not.
6569 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6570 MPI2_SAS_DEVICE_INFO_SEP &&
6571 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
6572 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6575 "failure at %s:%d/%s()!\n",
6576 __FILE__, __LINE__, __func__);
6579 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6580 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6583 * Allocate a virtual_phy object for vSES device, if
6584 * this vSES device is hot added.
6586 if (!_scsih_alloc_vphy(ioc, port_id, i))
6588 ioc->sas_hba.phy[i].hba_vphy = 1;
6591 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6592 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6594 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6595 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6596 ioc->sas_hba.phy[i].port =
6597 mpt3sas_get_port_by_id(ioc, port_id, 0);
6598 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6599 attached_handle, i, link_rate,
6600 ioc->sas_hba.phy[i].port);
6603 kfree(sas_iounit_pg0);
6607 * _scsih_sas_host_add - create sas host object
6608 * @ioc: per adapter object
6610 * Creating host side data object, stored in ioc->sas_hba
6613 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6616 Mpi2ConfigReply_t mpi_reply;
6617 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6618 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6619 Mpi2SasPhyPage0_t phy_pg0;
6620 Mpi2SasDevicePage0_t sas_device_pg0;
6621 Mpi2SasEnclosurePage0_t enclosure_pg0;
6624 u8 device_missing_delay;
6625 u8 num_phys, port_id;
6626 struct hba_port *port;
6628 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6630 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6631 __FILE__, __LINE__, __func__);
6634 ioc->sas_hba.phy = kcalloc(num_phys,
6635 sizeof(struct _sas_phy), GFP_KERNEL);
6636 if (!ioc->sas_hba.phy) {
6637 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6638 __FILE__, __LINE__, __func__);
6641 ioc->sas_hba.num_phys = num_phys;
6643 /* sas_iounit page 0 */
6644 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
6645 sizeof(Mpi2SasIOUnit0PhyData_t));
6646 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6647 if (!sas_iounit_pg0) {
6648 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6649 __FILE__, __LINE__, __func__);
6652 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6653 sas_iounit_pg0, sz))) {
6654 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6655 __FILE__, __LINE__, __func__);
6658 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6659 MPI2_IOCSTATUS_MASK;
6660 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6661 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6662 __FILE__, __LINE__, __func__);
6666 /* sas_iounit page 1 */
6667 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
6668 sizeof(Mpi2SasIOUnit1PhyData_t));
6669 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6670 if (!sas_iounit_pg1) {
6671 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6672 __FILE__, __LINE__, __func__);
6675 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6676 sas_iounit_pg1, sz))) {
6677 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6678 __FILE__, __LINE__, __func__);
6681 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6682 MPI2_IOCSTATUS_MASK;
6683 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6684 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6685 __FILE__, __LINE__, __func__);
6689 ioc->io_missing_delay =
6690 sas_iounit_pg1->IODeviceMissingDelay;
6691 device_missing_delay =
6692 sas_iounit_pg1->ReportDeviceMissingDelay;
6693 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6694 ioc->device_missing_delay = (device_missing_delay &
6695 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6697 ioc->device_missing_delay = device_missing_delay &
6698 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6700 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
6701 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6702 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6704 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6705 __FILE__, __LINE__, __func__);
6708 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6709 MPI2_IOCSTATUS_MASK;
6710 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6711 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6712 __FILE__, __LINE__, __func__);
6717 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6718 PhyData[0].ControllerDevHandle);
6720 port_id = sas_iounit_pg0->PhyData[i].Port;
6721 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6722 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6726 port->port_id = port_id;
6728 "hba_port entry: %p, port: %d is added to hba_port list\n",
6729 port, port->port_id);
6730 list_add_tail(&port->list,
6731 &ioc->port_table_list);
6735 * Check whether current Phy belongs to HBA vSES device or not.
6737 if ((le32_to_cpu(phy_pg0.PhyInfo) &
6738 MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6739 (phy_pg0.NegotiatedLinkRate >> 4) >=
6740 MPI2_SAS_NEG_LINK_RATE_1_5) {
6742 * Allocate a virtual_phy object for vSES device.
6744 if (!_scsih_alloc_vphy(ioc, port_id, i))
6746 ioc->sas_hba.phy[i].hba_vphy = 1;
6749 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6750 ioc->sas_hba.phy[i].phy_id = i;
6751 ioc->sas_hba.phy[i].port =
6752 mpt3sas_get_port_by_id(ioc, port_id, 0);
6753 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6754 phy_pg0, ioc->sas_hba.parent_dev);
6756 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6757 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
6758 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6759 __FILE__, __LINE__, __func__);
6762 ioc->sas_hba.enclosure_handle =
6763 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6764 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6765 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6766 ioc->sas_hba.handle,
6767 (u64)ioc->sas_hba.sas_address,
6768 ioc->sas_hba.num_phys);
6770 if (ioc->sas_hba.enclosure_handle) {
6771 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6772 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6773 ioc->sas_hba.enclosure_handle)))
6774 ioc->sas_hba.enclosure_logical_id =
6775 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6779 kfree(sas_iounit_pg1);
6780 kfree(sas_iounit_pg0);
6784 * _scsih_expander_add - creating expander object
6785 * @ioc: per adapter object
6786 * @handle: expander handle
6788 * Creating expander object, stored in ioc->sas_expander_list.
6790 * Return: 0 for success, else error.
6793 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6795 struct _sas_node *sas_expander;
6796 struct _enclosure_node *enclosure_dev;
6797 Mpi2ConfigReply_t mpi_reply;
6798 Mpi2ExpanderPage0_t expander_pg0;
6799 Mpi2ExpanderPage1_t expander_pg1;
6802 u64 sas_address, sas_address_parent = 0;
6804 unsigned long flags;
6805 struct _sas_port *mpt3sas_port = NULL;
6813 if (ioc->shost_recovery || ioc->pci_error_recovery)
6816 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6817 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
6818 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6819 __FILE__, __LINE__, __func__);
6823 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6824 MPI2_IOCSTATUS_MASK;
6825 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6826 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6827 __FILE__, __LINE__, __func__);
6831 /* handle out of order topology events */
6832 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6833 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6835 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6836 __FILE__, __LINE__, __func__);
6840 port_id = expander_pg0.PhysicalPort;
6841 if (sas_address_parent != ioc->sas_hba.sas_address) {
6842 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6843 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6845 mpt3sas_get_port_by_id(ioc, port_id, 0));
6846 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6847 if (!sas_expander) {
6848 rc = _scsih_expander_add(ioc, parent_handle);
6854 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6855 sas_address = le64_to_cpu(expander_pg0.SASAddress);
6856 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6857 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
6858 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6863 sas_expander = kzalloc(sizeof(struct _sas_node),
6865 if (!sas_expander) {
6866 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6867 __FILE__, __LINE__, __func__);
6871 sas_expander->handle = handle;
6872 sas_expander->num_phys = expander_pg0.NumPhys;
6873 sas_expander->sas_address_parent = sas_address_parent;
6874 sas_expander->sas_address = sas_address;
6875 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6876 if (!sas_expander->port) {
6877 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6878 __FILE__, __LINE__, __func__);
6883 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6884 handle, parent_handle,
6885 (u64)sas_expander->sas_address, sas_expander->num_phys);
6887 if (!sas_expander->num_phys)
6889 sas_expander->phy = kcalloc(sas_expander->num_phys,
6890 sizeof(struct _sas_phy), GFP_KERNEL);
6891 if (!sas_expander->phy) {
6892 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6893 __FILE__, __LINE__, __func__);
6898 INIT_LIST_HEAD(&sas_expander->sas_port_list);
6899 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6900 sas_address_parent, sas_expander->port);
6901 if (!mpt3sas_port) {
6902 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6903 __FILE__, __LINE__, __func__);
6907 sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
6908 sas_expander->rphy = mpt3sas_port->rphy;
6910 for (i = 0 ; i < sas_expander->num_phys ; i++) {
6911 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
6912 &expander_pg1, i, handle))) {
6913 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6914 __FILE__, __LINE__, __func__);
6918 sas_expander->phy[i].handle = handle;
6919 sas_expander->phy[i].phy_id = i;
6920 sas_expander->phy[i].port =
6921 mpt3sas_get_port_by_id(ioc, port_id, 0);
6923 if ((mpt3sas_transport_add_expander_phy(ioc,
6924 &sas_expander->phy[i], expander_pg1,
6925 sas_expander->parent_dev))) {
6926 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6927 __FILE__, __LINE__, __func__);
6933 if (sas_expander->enclosure_handle) {
6935 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6936 sas_expander->enclosure_handle);
6938 sas_expander->enclosure_logical_id =
6939 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6942 _scsih_expander_node_add(ioc, sas_expander);
6948 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
6949 sas_address_parent, sas_expander->port);
6950 kfree(sas_expander);
6955 * mpt3sas_expander_remove - removing expander object
6956 * @ioc: per adapter object
6957 * @sas_address: expander sas_address
6958 * @port: hba port entry
6961 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
6962 struct hba_port *port)
6964 struct _sas_node *sas_expander;
6965 unsigned long flags;
6967 if (ioc->shost_recovery)
6973 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6974 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6976 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6978 _scsih_expander_node_remove(ioc, sas_expander);
6982 * _scsih_done - internal SCSI_IO callback handler.
6983 * @ioc: per adapter object
6984 * @smid: system request message index
6985 * @msix_index: MSIX table index supplied by the OS
6986 * @reply: reply message frame(lower 32bit addr)
6988 * Callback handler when sending internal generated SCSI_IO.
6989 * The callback index passed is `ioc->scsih_cb_idx`
6991 * Return: 1 meaning mf should be freed from _base_interrupt
6992 * 0 means the mf is freed from this function.
6995 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
6997 MPI2DefaultReply_t *mpi_reply;
6999 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
7000 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
7002 if (ioc->scsih_cmds.smid != smid)
7004 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
7006 memcpy(ioc->scsih_cmds.reply, mpi_reply,
7007 mpi_reply->MsgLength*4);
7008 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
7010 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
7011 complete(&ioc->scsih_cmds.done);
7018 #define MPT3_MAX_LUNS (255)
7022 * _scsih_check_access_status - check access flags
7023 * @ioc: per adapter object
7024 * @sas_address: sas address
7025 * @handle: sas device handle
7026 * @access_status: errors returned during discovery of the device
7028 * Return: 0 for success, else failure
7031 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7032 u16 handle, u8 access_status)
7037 switch (access_status) {
7038 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7039 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7042 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7043 desc = "sata capability failed";
7045 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7046 desc = "sata affiliation conflict";
7048 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7049 desc = "route not addressable";
7051 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7052 desc = "smp error not addressable";
7054 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7055 desc = "device blocked";
7057 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7058 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7059 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7060 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7061 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7062 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7063 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7064 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7065 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7066 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7067 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7068 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7069 desc = "sata initialization failed";
7079 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7080 desc, (u64)sas_address, handle);
7085 * _scsih_check_device - checking device responsiveness
7086 * @ioc: per adapter object
7087 * @parent_sas_address: sas address of parent expander or sas host
7088 * @handle: attached device handle
7089 * @phy_number: phy number
7090 * @link_rate: new link rate
7093 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7094 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7096 Mpi2ConfigReply_t mpi_reply;
7097 Mpi2SasDevicePage0_t sas_device_pg0;
7098 struct _sas_device *sas_device = NULL;
7099 struct _enclosure_node *enclosure_dev = NULL;
7101 unsigned long flags;
7103 struct scsi_target *starget;
7104 struct MPT3SAS_TARGET *sas_target_priv_data;
7106 struct hba_port *port;
7108 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7109 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7112 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7113 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7116 /* wide port handling ~ we need only handle device once for the phy that
7117 * is matched in sas device page zero
7119 if (phy_number != sas_device_pg0.PhyNum)
7122 /* check if this is end device */
7123 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7124 if (!(_scsih_is_end_device(device_info)))
7127 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7128 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7129 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7132 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7138 if (unlikely(sas_device->handle != handle)) {
7139 starget = sas_device->starget;
7140 sas_target_priv_data = starget->hostdata;
7141 starget_printk(KERN_INFO, starget,
7142 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7143 sas_device->handle, handle);
7144 sas_target_priv_data->handle = handle;
7145 sas_device->handle = handle;
7146 if (le16_to_cpu(sas_device_pg0.Flags) &
7147 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7148 sas_device->enclosure_level =
7149 sas_device_pg0.EnclosureLevel;
7150 memcpy(sas_device->connector_name,
7151 sas_device_pg0.ConnectorName, 4);
7152 sas_device->connector_name[4] = '\0';
7154 sas_device->enclosure_level = 0;
7155 sas_device->connector_name[0] = '\0';
7158 sas_device->enclosure_handle =
7159 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7160 sas_device->is_chassis_slot_valid = 0;
7161 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7162 sas_device->enclosure_handle);
7163 if (enclosure_dev) {
7164 sas_device->enclosure_logical_id =
7165 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7166 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7167 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7168 sas_device->is_chassis_slot_valid = 1;
7169 sas_device->chassis_slot =
7170 enclosure_dev->pg0.ChassisSlot;
7175 /* check if device is present */
7176 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7177 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7178 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7183 /* check if there were any issues with discovery */
7184 if (_scsih_check_access_status(ioc, sas_address, handle,
7185 sas_device_pg0.AccessStatus))
7188 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7189 _scsih_ublock_io_device(ioc, sas_address, port);
7192 sas_device_put(sas_device);
7196 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7198 sas_device_put(sas_device);
7202 * _scsih_add_device - creating sas device object
7203 * @ioc: per adapter object
7204 * @handle: sas device handle
7205 * @phy_num: phy number end device attached to
7206 * @is_pd: is this hidden raid component
7208 * Creating end device object, stored in ioc->sas_device_list.
7210 * Return: 0 for success, non-zero for failure.
7213 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7216 Mpi2ConfigReply_t mpi_reply;
7217 Mpi2SasDevicePage0_t sas_device_pg0;
7218 struct _sas_device *sas_device;
7219 struct _enclosure_node *enclosure_dev = NULL;
7225 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7226 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7227 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7228 __FILE__, __LINE__, __func__);
7232 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7233 MPI2_IOCSTATUS_MASK;
7234 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7235 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7236 __FILE__, __LINE__, __func__);
7240 /* check if this is end device */
7241 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7242 if (!(_scsih_is_end_device(device_info)))
7244 set_bit(handle, ioc->pend_os_device_add);
7245 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7247 /* check if device is present */
7248 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7249 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7250 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7255 /* check if there were any issues with discovery */
7256 if (_scsih_check_access_status(ioc, sas_address, handle,
7257 sas_device_pg0.AccessStatus))
7260 port_id = sas_device_pg0.PhysicalPort;
7261 sas_device = mpt3sas_get_sdev_by_addr(ioc,
7262 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7264 clear_bit(handle, ioc->pend_os_device_add);
7265 sas_device_put(sas_device);
7269 if (sas_device_pg0.EnclosureHandle) {
7271 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7272 le16_to_cpu(sas_device_pg0.EnclosureHandle));
7273 if (enclosure_dev == NULL)
7274 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7275 sas_device_pg0.EnclosureHandle);
7278 sas_device = kzalloc(sizeof(struct _sas_device),
7281 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7282 __FILE__, __LINE__, __func__);
7286 kref_init(&sas_device->refcount);
7287 sas_device->handle = handle;
7288 if (_scsih_get_sas_address(ioc,
7289 le16_to_cpu(sas_device_pg0.ParentDevHandle),
7290 &sas_device->sas_address_parent) != 0)
7291 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7292 __FILE__, __LINE__, __func__);
7293 sas_device->enclosure_handle =
7294 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7295 if (sas_device->enclosure_handle != 0)
7297 le16_to_cpu(sas_device_pg0.Slot);
7298 sas_device->device_info = device_info;
7299 sas_device->sas_address = sas_address;
7300 sas_device->phy = sas_device_pg0.PhyNum;
7301 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7302 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7303 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7304 if (!sas_device->port) {
7305 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7306 __FILE__, __LINE__, __func__);
7310 if (le16_to_cpu(sas_device_pg0.Flags)
7311 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7312 sas_device->enclosure_level =
7313 sas_device_pg0.EnclosureLevel;
7314 memcpy(sas_device->connector_name,
7315 sas_device_pg0.ConnectorName, 4);
7316 sas_device->connector_name[4] = '\0';
7318 sas_device->enclosure_level = 0;
7319 sas_device->connector_name[0] = '\0';
7321 /* get enclosure_logical_id & chassis_slot*/
7322 sas_device->is_chassis_slot_valid = 0;
7323 if (enclosure_dev) {
7324 sas_device->enclosure_logical_id =
7325 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7326 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7327 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7328 sas_device->is_chassis_slot_valid = 1;
7329 sas_device->chassis_slot =
7330 enclosure_dev->pg0.ChassisSlot;
7334 /* get device name */
7335 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
7337 if (ioc->wait_for_discovery_to_complete)
7338 _scsih_sas_device_init_add(ioc, sas_device);
7340 _scsih_sas_device_add(ioc, sas_device);
7343 sas_device_put(sas_device);
7348 * _scsih_remove_device - removing sas device object
7349 * @ioc: per adapter object
7350 * @sas_device: the sas_device object
7353 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7354 struct _sas_device *sas_device)
7356 struct MPT3SAS_TARGET *sas_target_priv_data;
7358 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7359 (sas_device->pfa_led_on)) {
7360 _scsih_turn_off_pfa_led(ioc, sas_device);
7361 sas_device->pfa_led_on = 0;
7365 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7367 sas_device->handle, (u64)sas_device->sas_address));
7369 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7372 if (sas_device->starget && sas_device->starget->hostdata) {
7373 sas_target_priv_data = sas_device->starget->hostdata;
7374 sas_target_priv_data->deleted = 1;
7375 _scsih_ublock_io_device(ioc, sas_device->sas_address,
7377 sas_target_priv_data->handle =
7378 MPT3SAS_INVALID_DEVICE_HANDLE;
7381 if (!ioc->hide_drives)
7382 mpt3sas_transport_port_remove(ioc,
7383 sas_device->sas_address,
7384 sas_device->sas_address_parent,
7387 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7388 sas_device->handle, (u64)sas_device->sas_address);
7390 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
7393 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7395 sas_device->handle, (u64)sas_device->sas_address));
7396 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7401 * _scsih_sas_topology_change_event_debug - debug for topology event
7402 * @ioc: per adapter object
7403 * @event_data: event data payload
7407 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7408 Mpi2EventDataSasTopologyChangeList_t *event_data)
7414 char *status_str = NULL;
7415 u8 link_rate, prev_link_rate;
7417 switch (event_data->ExpStatus) {
7418 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7421 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7422 status_str = "remove";
7424 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7426 status_str = "responding";
7428 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7429 status_str = "remove delay";
7432 status_str = "unknown status";
7435 ioc_info(ioc, "sas topology change: (%s)\n", status_str);
7436 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7437 "start_phy(%02d), count(%d)\n",
7438 le16_to_cpu(event_data->ExpanderDevHandle),
7439 le16_to_cpu(event_data->EnclosureHandle),
7440 event_data->StartPhyNum, event_data->NumEntries);
7441 for (i = 0; i < event_data->NumEntries; i++) {
7442 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7445 phy_number = event_data->StartPhyNum + i;
7446 reason_code = event_data->PHY[i].PhyStatus &
7447 MPI2_EVENT_SAS_TOPO_RC_MASK;
7448 switch (reason_code) {
7449 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7450 status_str = "target add";
7452 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7453 status_str = "target remove";
7455 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7456 status_str = "delay target remove";
7458 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7459 status_str = "link rate change";
7461 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7462 status_str = "target responding";
7465 status_str = "unknown";
7468 link_rate = event_data->PHY[i].LinkRate >> 4;
7469 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7470 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7471 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7472 handle, status_str, link_rate, prev_link_rate);
7478 * _scsih_sas_topology_change_event - handle topology changes
7479 * @ioc: per adapter object
7480 * @fw_event: The fw_event_work object
7485 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7486 struct fw_event_work *fw_event)
7489 u16 parent_handle, handle;
7491 u8 phy_number, max_phys;
7492 struct _sas_node *sas_expander;
7494 unsigned long flags;
7495 u8 link_rate, prev_link_rate;
7496 struct hba_port *port;
7497 Mpi2EventDataSasTopologyChangeList_t *event_data =
7498 (Mpi2EventDataSasTopologyChangeList_t *)
7499 fw_event->event_data;
7501 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7502 _scsih_sas_topology_change_event_debug(ioc, event_data);
7504 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7507 if (!ioc->sas_hba.num_phys)
7508 _scsih_sas_host_add(ioc);
7510 _scsih_sas_host_refresh(ioc);
7512 if (fw_event->ignore) {
7513 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
7517 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
7518 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
7520 /* handle expander add */
7521 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7522 if (_scsih_expander_add(ioc, parent_handle) != 0)
7525 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7526 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7529 sas_address = sas_expander->sas_address;
7530 max_phys = sas_expander->num_phys;
7531 port = sas_expander->port;
7532 } else if (parent_handle < ioc->sas_hba.num_phys) {
7533 sas_address = ioc->sas_hba.sas_address;
7534 max_phys = ioc->sas_hba.num_phys;
7536 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7539 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7541 /* handle siblings events */
7542 for (i = 0; i < event_data->NumEntries; i++) {
7543 if (fw_event->ignore) {
7545 ioc_info(ioc, "ignoring expander event\n"));
7548 if (ioc->remove_host || ioc->pci_error_recovery)
7550 phy_number = event_data->StartPhyNum + i;
7551 if (phy_number >= max_phys)
7553 reason_code = event_data->PHY[i].PhyStatus &
7554 MPI2_EVENT_SAS_TOPO_RC_MASK;
7555 if ((event_data->PHY[i].PhyStatus &
7556 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7557 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7559 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7562 link_rate = event_data->PHY[i].LinkRate >> 4;
7563 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7564 switch (reason_code) {
7565 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7567 if (ioc->shost_recovery)
7570 if (link_rate == prev_link_rate)
7573 mpt3sas_transport_update_links(ioc, sas_address,
7574 handle, phy_number, link_rate, port);
7576 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7579 _scsih_check_device(ioc, sas_address, handle,
7580 phy_number, link_rate);
7582 if (!test_bit(handle, ioc->pend_os_device_add))
7587 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7589 if (ioc->shost_recovery)
7592 mpt3sas_transport_update_links(ioc, sas_address,
7593 handle, phy_number, link_rate, port);
7595 _scsih_add_device(ioc, handle, phy_number, 0);
7598 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7600 _scsih_device_remove_by_handle(ioc, handle);
7605 /* handle expander removal */
7606 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7608 mpt3sas_expander_remove(ioc, sas_address, port);
7614 * _scsih_sas_device_status_change_event_debug - debug for device event
7616 * @event_data: event data payload
7620 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7621 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7623 char *reason_str = NULL;
7625 switch (event_data->ReasonCode) {
7626 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7627 reason_str = "smart data";
7629 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7630 reason_str = "unsupported device discovered";
7632 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7633 reason_str = "internal device reset";
7635 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7636 reason_str = "internal task abort";
7638 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7639 reason_str = "internal task abort set";
7641 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7642 reason_str = "internal clear task set";
7644 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7645 reason_str = "internal query task";
7647 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7648 reason_str = "sata init failure";
7650 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7651 reason_str = "internal device reset complete";
7653 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7654 reason_str = "internal task abort complete";
7656 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7657 reason_str = "internal async notification";
7659 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7660 reason_str = "expander reduced functionality";
7662 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7663 reason_str = "expander reduced functionality complete";
7666 reason_str = "unknown reason";
7669 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7670 reason_str, le16_to_cpu(event_data->DevHandle),
7671 (u64)le64_to_cpu(event_data->SASAddress),
7672 le16_to_cpu(event_data->TaskTag));
7673 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
7674 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7675 event_data->ASC, event_data->ASCQ);
7680 * _scsih_sas_device_status_change_event - handle device status change
7681 * @ioc: per adapter object
7682 * @event_data: The fw event
7686 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7687 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7689 struct MPT3SAS_TARGET *target_priv_data;
7690 struct _sas_device *sas_device;
7692 unsigned long flags;
7694 /* In MPI Revision K (0xC), the internal device reset complete was
7695 * implemented, so avoid setting tm_busy flag for older firmware.
7697 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7700 if (event_data->ReasonCode !=
7701 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7702 event_data->ReasonCode !=
7703 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7706 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7707 sas_address = le64_to_cpu(event_data->SASAddress);
7708 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7710 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
7712 if (!sas_device || !sas_device->starget)
7715 target_priv_data = sas_device->starget->hostdata;
7716 if (!target_priv_data)
7719 if (event_data->ReasonCode ==
7720 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7721 target_priv_data->tm_busy = 1;
7723 target_priv_data->tm_busy = 0;
7725 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7727 "%s tm_busy flag for handle(0x%04x)\n",
7728 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7729 target_priv_data->handle);
7733 sas_device_put(sas_device);
7735 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7740 * _scsih_check_pcie_access_status - check access flags
7741 * @ioc: per adapter object
7743 * @handle: sas device handle
7744 * @access_status: errors returned during discovery of the device
7746 * Return: 0 for success, else failure
7749 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7750 u16 handle, u8 access_status)
7755 switch (access_status) {
7756 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7757 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7760 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7761 desc = "PCIe device capability failed";
7763 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7764 desc = "PCIe device blocked";
7766 "Device with Access Status (%s): wwid(0x%016llx), "
7767 "handle(0x%04x)\n ll only be added to the internal list",
7768 desc, (u64)wwid, handle);
7771 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7772 desc = "PCIe device mem space access failed";
7774 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7775 desc = "PCIe device unsupported";
7777 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7778 desc = "PCIe device MSIx Required";
7780 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7781 desc = "PCIe device init fail max";
7783 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7784 desc = "PCIe device status unknown";
7786 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7787 desc = "nvme ready timeout";
7789 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7790 desc = "nvme device configuration unsupported";
7792 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7793 desc = "nvme identify failed";
7795 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7796 desc = "nvme qconfig failed";
7798 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7799 desc = "nvme qcreation failed";
7801 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7802 desc = "nvme eventcfg failed";
7804 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7805 desc = "nvme get feature stat failed";
7807 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7808 desc = "nvme idle timeout";
7810 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7811 desc = "nvme failure status";
7814 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7815 access_status, (u64)wwid, handle);
7822 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7823 desc, (u64)wwid, handle);
7828 * _scsih_pcie_device_remove_from_sml - removing pcie device
7829 * from SML and free up associated memory
7830 * @ioc: per adapter object
7831 * @pcie_device: the pcie_device object
7834 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7835 struct _pcie_device *pcie_device)
7837 struct MPT3SAS_TARGET *sas_target_priv_data;
7840 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7842 pcie_device->handle, (u64)pcie_device->wwid));
7843 if (pcie_device->enclosure_handle != 0)
7845 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7847 (u64)pcie_device->enclosure_logical_id,
7848 pcie_device->slot));
7849 if (pcie_device->connector_name[0] != '\0')
7851 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7853 pcie_device->enclosure_level,
7854 pcie_device->connector_name));
7856 if (pcie_device->starget && pcie_device->starget->hostdata) {
7857 sas_target_priv_data = pcie_device->starget->hostdata;
7858 sas_target_priv_data->deleted = 1;
7859 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
7860 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7863 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7864 pcie_device->handle, (u64)pcie_device->wwid);
7865 if (pcie_device->enclosure_handle != 0)
7866 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7867 (u64)pcie_device->enclosure_logical_id,
7869 if (pcie_device->connector_name[0] != '\0')
7870 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7871 pcie_device->enclosure_level,
7872 pcie_device->connector_name);
7874 if (pcie_device->starget && (pcie_device->access_status !=
7875 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
7876 scsi_remove_target(&pcie_device->starget->dev);
7878 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7880 pcie_device->handle, (u64)pcie_device->wwid));
7881 if (pcie_device->enclosure_handle != 0)
7883 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7885 (u64)pcie_device->enclosure_logical_id,
7886 pcie_device->slot));
7887 if (pcie_device->connector_name[0] != '\0')
7889 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7891 pcie_device->enclosure_level,
7892 pcie_device->connector_name));
7894 kfree(pcie_device->serial_number);
7899 * _scsih_pcie_check_device - checking device responsiveness
7900 * @ioc: per adapter object
7901 * @handle: attached device handle
7904 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7906 Mpi2ConfigReply_t mpi_reply;
7907 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7909 struct _pcie_device *pcie_device;
7911 unsigned long flags;
7912 struct scsi_target *starget;
7913 struct MPT3SAS_TARGET *sas_target_priv_data;
7916 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7917 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
7920 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7921 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7924 /* check if this is end device */
7925 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
7926 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
7929 wwid = le64_to_cpu(pcie_device_pg0.WWID);
7930 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7931 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7934 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7938 if (unlikely(pcie_device->handle != handle)) {
7939 starget = pcie_device->starget;
7940 sas_target_priv_data = starget->hostdata;
7941 pcie_device->access_status = pcie_device_pg0.AccessStatus;
7942 starget_printk(KERN_INFO, starget,
7943 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7944 pcie_device->handle, handle);
7945 sas_target_priv_data->handle = handle;
7946 pcie_device->handle = handle;
7948 if (le32_to_cpu(pcie_device_pg0.Flags) &
7949 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
7950 pcie_device->enclosure_level =
7951 pcie_device_pg0.EnclosureLevel;
7952 memcpy(&pcie_device->connector_name[0],
7953 &pcie_device_pg0.ConnectorName[0], 4);
7955 pcie_device->enclosure_level = 0;
7956 pcie_device->connector_name[0] = '\0';
7960 /* check if device is present */
7961 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
7962 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
7963 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
7965 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7966 pcie_device_put(pcie_device);
7970 /* check if there were any issues with discovery */
7971 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
7972 pcie_device_pg0.AccessStatus)) {
7973 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7974 pcie_device_put(pcie_device);
7978 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7979 pcie_device_put(pcie_device);
7981 _scsih_ublock_io_device(ioc, wwid, NULL);
7987 * _scsih_pcie_add_device - creating pcie device object
7988 * @ioc: per adapter object
7989 * @handle: pcie device handle
7991 * Creating end device object, stored in ioc->pcie_device_list.
7993 * Return: 1 means queue the event later, 0 means complete the event
7996 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7998 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7999 Mpi26PCIeDevicePage2_t pcie_device_pg2;
8000 Mpi2ConfigReply_t mpi_reply;
8001 struct _pcie_device *pcie_device;
8002 struct _enclosure_node *enclosure_dev;
8006 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8007 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
8008 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8009 __FILE__, __LINE__, __func__);
8012 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8013 MPI2_IOCSTATUS_MASK;
8014 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8015 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8016 __FILE__, __LINE__, __func__);
8020 set_bit(handle, ioc->pend_os_device_add);
8021 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8023 /* check if device is present */
8024 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8025 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8026 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8031 /* check if there were any issues with discovery */
8032 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8033 pcie_device_pg0.AccessStatus))
8036 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8037 (pcie_device_pg0.DeviceInfo))))
8040 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8042 clear_bit(handle, ioc->pend_os_device_add);
8043 pcie_device_put(pcie_device);
8047 /* PCIe Device Page 2 contains read-only information about a
8048 * specific NVMe device; therefore, this page is only
8049 * valid for NVMe devices and skip for pcie devices of type scsi.
8051 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8052 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8053 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8054 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8057 "failure at %s:%d/%s()!\n", __FILE__,
8058 __LINE__, __func__);
8062 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8063 MPI2_IOCSTATUS_MASK;
8064 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8066 "failure at %s:%d/%s()!\n", __FILE__,
8067 __LINE__, __func__);
8072 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8074 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8075 __FILE__, __LINE__, __func__);
8079 kref_init(&pcie_device->refcount);
8080 pcie_device->id = ioc->pcie_target_id++;
8081 pcie_device->channel = PCIE_CHANNEL;
8082 pcie_device->handle = handle;
8083 pcie_device->access_status = pcie_device_pg0.AccessStatus;
8084 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8085 pcie_device->wwid = wwid;
8086 pcie_device->port_num = pcie_device_pg0.PortNum;
8087 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8088 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8090 pcie_device->enclosure_handle =
8091 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8092 if (pcie_device->enclosure_handle != 0)
8093 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8095 if (le32_to_cpu(pcie_device_pg0.Flags) &
8096 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8097 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8098 memcpy(&pcie_device->connector_name[0],
8099 &pcie_device_pg0.ConnectorName[0], 4);
8101 pcie_device->enclosure_level = 0;
8102 pcie_device->connector_name[0] = '\0';
8105 /* get enclosure_logical_id */
8106 if (pcie_device->enclosure_handle) {
8108 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8109 pcie_device->enclosure_handle);
8111 pcie_device->enclosure_logical_id =
8112 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8114 /* TODO -- Add device name once FW supports it */
8115 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8116 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8117 pcie_device->nvme_mdts =
8118 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
8119 pcie_device->shutdown_latency =
8120 le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8122 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
8123 * if drive's RTD3 Entry Latency is greater then IOC's
8124 * max_shutdown_latency.
8126 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8127 ioc->max_shutdown_latency =
8128 pcie_device->shutdown_latency;
8129 if (pcie_device_pg2.ControllerResetTO)
8130 pcie_device->reset_timeout =
8131 pcie_device_pg2.ControllerResetTO;
8133 pcie_device->reset_timeout = 30;
8135 pcie_device->reset_timeout = 30;
8137 if (ioc->wait_for_discovery_to_complete)
8138 _scsih_pcie_device_init_add(ioc, pcie_device);
8140 _scsih_pcie_device_add(ioc, pcie_device);
8142 pcie_device_put(pcie_device);
8147 * _scsih_pcie_topology_change_event_debug - debug for topology
8149 * @ioc: per adapter object
8150 * @event_data: event data payload
8154 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8155 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8161 char *status_str = NULL;
8162 u8 link_rate, prev_link_rate;
8164 switch (event_data->SwitchStatus) {
8165 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8168 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8169 status_str = "remove";
8171 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8173 status_str = "responding";
8175 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8176 status_str = "remove delay";
8179 status_str = "unknown status";
8182 ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
8183 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8184 "start_port(%02d), count(%d)\n",
8185 le16_to_cpu(event_data->SwitchDevHandle),
8186 le16_to_cpu(event_data->EnclosureHandle),
8187 event_data->StartPortNum, event_data->NumEntries);
8188 for (i = 0; i < event_data->NumEntries; i++) {
8190 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8193 port_number = event_data->StartPortNum + i;
8194 reason_code = event_data->PortEntry[i].PortStatus;
8195 switch (reason_code) {
8196 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8197 status_str = "target add";
8199 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8200 status_str = "target remove";
8202 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8203 status_str = "delay target remove";
8205 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8206 status_str = "link rate change";
8208 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8209 status_str = "target responding";
8212 status_str = "unknown";
8215 link_rate = event_data->PortEntry[i].CurrentPortInfo &
8216 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8217 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8218 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8219 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8220 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
8221 handle, status_str, link_rate, prev_link_rate);
8226 * _scsih_pcie_topology_change_event - handle PCIe topology
8228 * @ioc: per adapter object
8229 * @fw_event: The fw_event_work object
8234 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8235 struct fw_event_work *fw_event)
8240 u8 link_rate, prev_link_rate;
8241 unsigned long flags;
8243 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8244 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8245 struct _pcie_device *pcie_device;
8247 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8248 _scsih_pcie_topology_change_event_debug(ioc, event_data);
8250 if (ioc->shost_recovery || ioc->remove_host ||
8251 ioc->pci_error_recovery)
8254 if (fw_event->ignore) {
8255 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
8259 /* handle siblings events */
8260 for (i = 0; i < event_data->NumEntries; i++) {
8261 if (fw_event->ignore) {
8263 ioc_info(ioc, "ignoring switch event\n"));
8266 if (ioc->remove_host || ioc->pci_error_recovery)
8268 reason_code = event_data->PortEntry[i].PortStatus;
8270 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8274 link_rate = event_data->PortEntry[i].CurrentPortInfo
8275 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8276 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8277 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8279 switch (reason_code) {
8280 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8281 if (ioc->shost_recovery)
8283 if (link_rate == prev_link_rate)
8285 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8288 _scsih_pcie_check_device(ioc, handle);
8290 /* This code after this point handles the test case
8291 * where a device has been added, however its returning
8292 * BUSY for sometime. Then before the Device Missing
8293 * Delay expires and the device becomes READY, the
8294 * device is removed and added back.
8296 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8297 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8298 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8301 pcie_device_put(pcie_device);
8305 if (!test_bit(handle, ioc->pend_os_device_add))
8309 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8311 event_data->PortEntry[i].PortStatus &= 0xF0;
8312 event_data->PortEntry[i].PortStatus |=
8313 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
8315 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8316 if (ioc->shost_recovery)
8318 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8321 rc = _scsih_pcie_add_device(ioc, handle);
8323 /* mark entry vacant */
8324 /* TODO This needs to be reviewed and fixed,
8325 * we dont have an entry
8326 * to make an event void like vacant
8328 event_data->PortEntry[i].PortStatus |=
8329 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8332 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8333 _scsih_pcie_device_remove_by_handle(ioc, handle);
8340 * _scsih_pcie_device_status_change_event_debug - debug for device event
8342 * @event_data: event data payload
8346 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8347 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8349 char *reason_str = NULL;
8351 switch (event_data->ReasonCode) {
8352 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8353 reason_str = "smart data";
8355 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8356 reason_str = "unsupported device discovered";
8358 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8359 reason_str = "internal device reset";
8361 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8362 reason_str = "internal task abort";
8364 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8365 reason_str = "internal task abort set";
8367 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8368 reason_str = "internal clear task set";
8370 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8371 reason_str = "internal query task";
8373 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8374 reason_str = "device init failure";
8376 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8377 reason_str = "internal device reset complete";
8379 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8380 reason_str = "internal task abort complete";
8382 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8383 reason_str = "internal async notification";
8385 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8386 reason_str = "pcie hot reset failed";
8389 reason_str = "unknown reason";
8393 ioc_info(ioc, "PCIE device status change: (%s)\n"
8394 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8395 reason_str, le16_to_cpu(event_data->DevHandle),
8396 (u64)le64_to_cpu(event_data->WWID),
8397 le16_to_cpu(event_data->TaskTag));
8398 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
8399 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8400 event_data->ASC, event_data->ASCQ);
8405 * _scsih_pcie_device_status_change_event - handle device status
8407 * @ioc: per adapter object
8408 * @fw_event: The fw_event_work object
8412 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8413 struct fw_event_work *fw_event)
8415 struct MPT3SAS_TARGET *target_priv_data;
8416 struct _pcie_device *pcie_device;
8418 unsigned long flags;
8419 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8420 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8421 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8422 _scsih_pcie_device_status_change_event_debug(ioc,
8425 if (event_data->ReasonCode !=
8426 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8427 event_data->ReasonCode !=
8428 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8431 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8432 wwid = le64_to_cpu(event_data->WWID);
8433 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8435 if (!pcie_device || !pcie_device->starget)
8438 target_priv_data = pcie_device->starget->hostdata;
8439 if (!target_priv_data)
8442 if (event_data->ReasonCode ==
8443 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8444 target_priv_data->tm_busy = 1;
8446 target_priv_data->tm_busy = 0;
8449 pcie_device_put(pcie_device);
8451 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8455 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
8457 * @ioc: per adapter object
8458 * @event_data: event data payload
8462 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8463 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8465 char *reason_str = NULL;
8467 switch (event_data->ReasonCode) {
8468 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8469 reason_str = "enclosure add";
8471 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8472 reason_str = "enclosure remove";
8475 reason_str = "unknown reason";
8479 ioc_info(ioc, "enclosure status change: (%s)\n"
8480 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8482 le16_to_cpu(event_data->EnclosureHandle),
8483 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8484 le16_to_cpu(event_data->StartSlot));
8488 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
8489 * @ioc: per adapter object
8490 * @fw_event: The fw_event_work object
8494 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8495 struct fw_event_work *fw_event)
8497 Mpi2ConfigReply_t mpi_reply;
8498 struct _enclosure_node *enclosure_dev = NULL;
8499 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8500 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8502 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8504 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8505 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
8506 (Mpi2EventDataSasEnclDevStatusChange_t *)
8507 fw_event->event_data);
8508 if (ioc->shost_recovery)
8511 if (enclosure_handle)
8513 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8515 switch (event_data->ReasonCode) {
8516 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8517 if (!enclosure_dev) {
8519 kzalloc(sizeof(struct _enclosure_node),
8521 if (!enclosure_dev) {
8522 ioc_info(ioc, "failure at %s:%d/%s()!\n",
8523 __FILE__, __LINE__, __func__);
8526 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8527 &enclosure_dev->pg0,
8528 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8531 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8532 MPI2_IOCSTATUS_MASK)) {
8533 kfree(enclosure_dev);
8537 list_add_tail(&enclosure_dev->list,
8538 &ioc->enclosure_list);
8541 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8542 if (enclosure_dev) {
8543 list_del(&enclosure_dev->list);
8544 kfree(enclosure_dev);
8553 * _scsih_sas_broadcast_primitive_event - handle broadcast events
8554 * @ioc: per adapter object
8555 * @fw_event: The fw_event_work object
8559 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8560 struct fw_event_work *fw_event)
8562 struct scsi_cmnd *scmd;
8563 struct scsi_device *sdev;
8564 struct scsiio_tracker *st;
8567 struct MPT3SAS_DEVICE *sas_device_priv_data;
8568 u32 termination_count;
8570 Mpi2SCSITaskManagementReply_t *mpi_reply;
8571 Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8572 (Mpi2EventDataSasBroadcastPrimitive_t *)
8573 fw_event->event_data;
8575 unsigned long flags;
8578 u8 task_abort_retries;
8580 mutex_lock(&ioc->tm_cmds.mutex);
8581 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8582 __func__, event_data->PhyNum, event_data->PortWidth);
8584 _scsih_block_io_all_device(ioc);
8586 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8587 mpi_reply = ioc->tm_cmds.reply;
8588 broadcast_aen_retry:
8590 /* sanity checks for retrying this loop */
8591 if (max_retries++ == 5) {
8592 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
8594 } else if (max_retries > 1)
8596 ioc_info(ioc, "%s: %d retry\n",
8597 __func__, max_retries - 1));
8599 termination_count = 0;
8601 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8602 if (ioc->shost_recovery)
8604 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
8607 st = scsi_cmd_priv(scmd);
8608 sdev = scmd->device;
8609 sas_device_priv_data = sdev->hostdata;
8610 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8612 /* skip hidden raid components */
8613 if (sas_device_priv_data->sas_target->flags &
8614 MPT_TARGET_FLAGS_RAID_COMPONENT)
8617 if (sas_device_priv_data->sas_target->flags &
8618 MPT_TARGET_FLAGS_VOLUME)
8620 /* skip PCIe devices */
8621 if (sas_device_priv_data->sas_target->flags &
8622 MPT_TARGET_FLAGS_PCIE_DEVICE)
8625 handle = sas_device_priv_data->sas_target->handle;
8626 lun = sas_device_priv_data->lun;
8629 if (ioc->shost_recovery)
8632 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8633 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
8634 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
8635 st->msix_io, 30, 0);
8637 sdev_printk(KERN_WARNING, sdev,
8638 "mpt3sas_scsih_issue_tm: FAILED when sending "
8639 "QUERY_TASK: scmd(%p)\n", scmd);
8640 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8641 goto broadcast_aen_retry;
8643 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8644 & MPI2_IOCSTATUS_MASK;
8645 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8646 sdev_printk(KERN_WARNING, sdev,
8647 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8649 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8650 goto broadcast_aen_retry;
8653 /* see if IO is still owned by IOC and target */
8654 if (mpi_reply->ResponseCode ==
8655 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8656 mpi_reply->ResponseCode ==
8657 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8658 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8661 task_abort_retries = 0;
8663 if (task_abort_retries++ == 60) {
8665 ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8667 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8668 goto broadcast_aen_retry;
8671 if (ioc->shost_recovery)
8674 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8675 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8676 st->smid, st->msix_io, 30, 0);
8677 if (r == FAILED || st->cb_idx != 0xFF) {
8678 sdev_printk(KERN_WARNING, sdev,
8679 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8680 "scmd(%p)\n", scmd);
8684 if (task_abort_retries > 1)
8685 sdev_printk(KERN_WARNING, sdev,
8686 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8688 task_abort_retries - 1, scmd);
8690 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8691 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8694 if (ioc->broadcast_aen_pending) {
8697 "%s: loop back due to pending AEN\n",
8699 ioc->broadcast_aen_pending = 0;
8700 goto broadcast_aen_retry;
8704 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8708 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8709 __func__, query_count, termination_count));
8711 ioc->broadcast_aen_busy = 0;
8712 if (!ioc->shost_recovery)
8713 _scsih_ublock_io_all_device(ioc);
8714 mutex_unlock(&ioc->tm_cmds.mutex);
8718 * _scsih_sas_discovery_event - handle discovery events
8719 * @ioc: per adapter object
8720 * @fw_event: The fw_event_work object
8724 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8725 struct fw_event_work *fw_event)
8727 Mpi2EventDataSasDiscovery_t *event_data =
8728 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
8730 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
8731 ioc_info(ioc, "discovery event: (%s)",
8732 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8734 if (event_data->DiscoveryStatus)
8735 pr_cont("discovery_status(0x%08x)",
8736 le32_to_cpu(event_data->DiscoveryStatus));
8740 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8741 !ioc->sas_hba.num_phys) {
8742 if (disable_discovery > 0 && ioc->shost_recovery) {
8743 /* Wait for the reset to complete */
8744 while (ioc->shost_recovery)
8747 _scsih_sas_host_add(ioc);
8752 * _scsih_sas_device_discovery_error_event - display SAS device discovery error
8754 * @ioc: per adapter object
8755 * @fw_event: The fw_event_work object
8759 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8760 struct fw_event_work *fw_event)
8762 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8763 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8765 switch (event_data->ReasonCode) {
8766 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
8767 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8768 le16_to_cpu(event_data->DevHandle),
8769 (u64)le64_to_cpu(event_data->SASAddress),
8770 event_data->PhysicalPort);
8772 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
8773 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8774 le16_to_cpu(event_data->DevHandle),
8775 (u64)le64_to_cpu(event_data->SASAddress),
8776 event_data->PhysicalPort);
8784 * _scsih_pcie_enumeration_event - handle enumeration events
8785 * @ioc: per adapter object
8786 * @fw_event: The fw_event_work object
8790 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8791 struct fw_event_work *fw_event)
8793 Mpi26EventDataPCIeEnumeration_t *event_data =
8794 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8796 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8799 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8800 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8801 "started" : "completed",
8803 if (event_data->EnumerationStatus)
8804 pr_cont("enumeration_status(0x%08x)",
8805 le32_to_cpu(event_data->EnumerationStatus));
8810 * _scsih_ir_fastpath - turn on fastpath for IR physdisk
8811 * @ioc: per adapter object
8812 * @handle: device handle for physical disk
8813 * @phys_disk_num: physical disk number
8815 * Return: 0 for success, else failure.
8818 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8820 Mpi2RaidActionRequest_t *mpi_request;
8821 Mpi2RaidActionReply_t *mpi_reply;
8828 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8831 mutex_lock(&ioc->scsih_cmds.mutex);
8833 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
8834 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
8838 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8840 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8842 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
8843 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8848 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8849 ioc->scsih_cmds.smid = smid;
8850 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8852 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8853 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8854 mpi_request->PhysDiskNum = phys_disk_num;
8857 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8858 handle, phys_disk_num));
8860 init_completion(&ioc->scsih_cmds.done);
8861 ioc->put_smid_default(ioc, smid);
8862 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8864 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
8865 mpt3sas_check_cmd_timeout(ioc,
8866 ioc->scsih_cmds.status, mpi_request,
8867 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
8872 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8874 mpi_reply = ioc->scsih_cmds.reply;
8875 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8876 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8877 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
8880 ioc_status &= MPI2_IOCSTATUS_MASK;
8881 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8883 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8884 ioc_status, log_info));
8888 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8892 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8893 mutex_unlock(&ioc->scsih_cmds.mutex);
8896 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8901 * _scsih_reprobe_lun - reprobing lun
8902 * @sdev: scsi device struct
8903 * @no_uld_attach: sdev->no_uld_attach flag setting
8907 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
8909 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
8910 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
8911 sdev->no_uld_attach ? "hiding" : "exposing");
8912 WARN_ON(scsi_device_reprobe(sdev));
8916 * _scsih_sas_volume_add - add new volume
8917 * @ioc: per adapter object
8918 * @element: IR config element data
8922 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
8923 Mpi2EventIrConfigElement_t *element)
8925 struct _raid_device *raid_device;
8926 unsigned long flags;
8928 u16 handle = le16_to_cpu(element->VolDevHandle);
8931 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8933 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8934 __FILE__, __LINE__, __func__);
8938 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8939 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
8940 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8945 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8947 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8948 __FILE__, __LINE__, __func__);
8952 raid_device->id = ioc->sas_id++;
8953 raid_device->channel = RAID_CHANNEL;
8954 raid_device->handle = handle;
8955 raid_device->wwid = wwid;
8956 _scsih_raid_device_add(ioc, raid_device);
8957 if (!ioc->wait_for_discovery_to_complete) {
8958 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8959 raid_device->id, 0);
8961 _scsih_raid_device_remove(ioc, raid_device);
8963 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8964 _scsih_determine_boot_device(ioc, raid_device, 1);
8965 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8970 * _scsih_sas_volume_delete - delete volume
8971 * @ioc: per adapter object
8972 * @handle: volume device handle
8976 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8978 struct _raid_device *raid_device;
8979 unsigned long flags;
8980 struct MPT3SAS_TARGET *sas_target_priv_data;
8981 struct scsi_target *starget = NULL;
8983 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8984 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8986 if (raid_device->starget) {
8987 starget = raid_device->starget;
8988 sas_target_priv_data = starget->hostdata;
8989 sas_target_priv_data->deleted = 1;
8991 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
8992 raid_device->handle, (u64)raid_device->wwid);
8993 list_del(&raid_device->list);
8996 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8998 scsi_remove_target(&starget->dev);
9002 * _scsih_sas_pd_expose - expose pd component to /dev/sdX
9003 * @ioc: per adapter object
9004 * @element: IR config element data
9008 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
9009 Mpi2EventIrConfigElement_t *element)
9011 struct _sas_device *sas_device;
9012 struct scsi_target *starget = NULL;
9013 struct MPT3SAS_TARGET *sas_target_priv_data;
9014 unsigned long flags;
9015 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9017 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9018 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9020 sas_device->volume_handle = 0;
9021 sas_device->volume_wwid = 0;
9022 clear_bit(handle, ioc->pd_handles);
9023 if (sas_device->starget && sas_device->starget->hostdata) {
9024 starget = sas_device->starget;
9025 sas_target_priv_data = starget->hostdata;
9026 sas_target_priv_data->flags &=
9027 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9030 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9034 /* exposing raid component */
9036 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
9038 sas_device_put(sas_device);
9042 * _scsih_sas_pd_hide - hide pd component from /dev/sdX
9043 * @ioc: per adapter object
9044 * @element: IR config element data
9048 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9049 Mpi2EventIrConfigElement_t *element)
9051 struct _sas_device *sas_device;
9052 struct scsi_target *starget = NULL;
9053 struct MPT3SAS_TARGET *sas_target_priv_data;
9054 unsigned long flags;
9055 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9056 u16 volume_handle = 0;
9057 u64 volume_wwid = 0;
9059 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9061 mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9064 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9065 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9067 set_bit(handle, ioc->pd_handles);
9068 if (sas_device->starget && sas_device->starget->hostdata) {
9069 starget = sas_device->starget;
9070 sas_target_priv_data = starget->hostdata;
9071 sas_target_priv_data->flags |=
9072 MPT_TARGET_FLAGS_RAID_COMPONENT;
9073 sas_device->volume_handle = volume_handle;
9074 sas_device->volume_wwid = volume_wwid;
9077 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9081 /* hiding raid component */
9082 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9085 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
9087 sas_device_put(sas_device);
9091 * _scsih_sas_pd_delete - delete pd component
9092 * @ioc: per adapter object
9093 * @element: IR config element data
9097 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9098 Mpi2EventIrConfigElement_t *element)
9100 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9102 _scsih_device_remove_by_handle(ioc, handle);
9106 * _scsih_sas_pd_add - remove pd component
9107 * @ioc: per adapter object
9108 * @element: IR config element data
9112 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9113 Mpi2EventIrConfigElement_t *element)
9115 struct _sas_device *sas_device;
9116 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9117 Mpi2ConfigReply_t mpi_reply;
9118 Mpi2SasDevicePage0_t sas_device_pg0;
9123 set_bit(handle, ioc->pd_handles);
9125 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9127 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9128 sas_device_put(sas_device);
9132 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9133 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
9134 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9135 __FILE__, __LINE__, __func__);
9139 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9140 MPI2_IOCSTATUS_MASK;
9141 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9142 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9143 __FILE__, __LINE__, __func__);
9147 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9148 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9149 mpt3sas_transport_update_links(ioc, sas_address, handle,
9150 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9151 mpt3sas_get_port_by_id(ioc,
9152 sas_device_pg0.PhysicalPort, 0));
9154 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9155 _scsih_add_device(ioc, handle, 0, 1);
9159 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
9160 * @ioc: per adapter object
9161 * @event_data: event data payload
9165 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9166 Mpi2EventDataIrConfigChangeList_t *event_data)
9168 Mpi2EventIrConfigElement_t *element;
9171 char *reason_str = NULL, *element_str = NULL;
9173 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9175 ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9176 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9177 "foreign" : "native",
9178 event_data->NumElements);
9179 for (i = 0; i < event_data->NumElements; i++, element++) {
9180 switch (element->ReasonCode) {
9181 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9184 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9185 reason_str = "remove";
9187 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9188 reason_str = "no change";
9190 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9191 reason_str = "hide";
9193 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9194 reason_str = "unhide";
9196 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9197 reason_str = "volume_created";
9199 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9200 reason_str = "volume_deleted";
9202 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9203 reason_str = "pd_created";
9205 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9206 reason_str = "pd_deleted";
9209 reason_str = "unknown reason";
9212 element_type = le16_to_cpu(element->ElementFlags) &
9213 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9214 switch (element_type) {
9215 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9216 element_str = "volume";
9218 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9219 element_str = "phys disk";
9221 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9222 element_str = "hot spare";
9225 element_str = "unknown element";
9228 pr_info("\t(%s:%s), vol handle(0x%04x), " \
9229 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9230 reason_str, le16_to_cpu(element->VolDevHandle),
9231 le16_to_cpu(element->PhysDiskDevHandle),
9232 element->PhysDiskNum);
9237 * _scsih_sas_ir_config_change_event - handle ir configuration change events
9238 * @ioc: per adapter object
9239 * @fw_event: The fw_event_work object
9243 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9244 struct fw_event_work *fw_event)
9246 Mpi2EventIrConfigElement_t *element;
9249 Mpi2EventDataIrConfigChangeList_t *event_data =
9250 (Mpi2EventDataIrConfigChangeList_t *)
9251 fw_event->event_data;
9253 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9254 (!ioc->hide_ir_msg))
9255 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
9257 foreign_config = (le32_to_cpu(event_data->Flags) &
9258 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9260 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9261 if (ioc->shost_recovery &&
9262 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
9263 for (i = 0; i < event_data->NumElements; i++, element++) {
9264 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9265 _scsih_ir_fastpath(ioc,
9266 le16_to_cpu(element->PhysDiskDevHandle),
9267 element->PhysDiskNum);
9272 for (i = 0; i < event_data->NumElements; i++, element++) {
9274 switch (element->ReasonCode) {
9275 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9276 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9277 if (!foreign_config)
9278 _scsih_sas_volume_add(ioc, element);
9280 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9281 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9282 if (!foreign_config)
9283 _scsih_sas_volume_delete(ioc,
9284 le16_to_cpu(element->VolDevHandle));
9286 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9287 if (!ioc->is_warpdrive)
9288 _scsih_sas_pd_hide(ioc, element);
9290 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9291 if (!ioc->is_warpdrive)
9292 _scsih_sas_pd_expose(ioc, element);
9294 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9295 if (!ioc->is_warpdrive)
9296 _scsih_sas_pd_add(ioc, element);
9298 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9299 if (!ioc->is_warpdrive)
9300 _scsih_sas_pd_delete(ioc, element);
9307 * _scsih_sas_ir_volume_event - IR volume event
9308 * @ioc: per adapter object
9309 * @fw_event: The fw_event_work object
9313 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9314 struct fw_event_work *fw_event)
9317 unsigned long flags;
9318 struct _raid_device *raid_device;
9322 Mpi2EventDataIrVolume_t *event_data =
9323 (Mpi2EventDataIrVolume_t *) fw_event->event_data;
9325 if (ioc->shost_recovery)
9328 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9331 handle = le16_to_cpu(event_data->VolDevHandle);
9332 state = le32_to_cpu(event_data->NewValue);
9333 if (!ioc->hide_ir_msg)
9335 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9337 le32_to_cpu(event_data->PreviousValue),
9340 case MPI2_RAID_VOL_STATE_MISSING:
9341 case MPI2_RAID_VOL_STATE_FAILED:
9342 _scsih_sas_volume_delete(ioc, handle);
9345 case MPI2_RAID_VOL_STATE_ONLINE:
9346 case MPI2_RAID_VOL_STATE_DEGRADED:
9347 case MPI2_RAID_VOL_STATE_OPTIMAL:
9349 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9350 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9351 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9356 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9358 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9359 __FILE__, __LINE__, __func__);
9363 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9365 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9366 __FILE__, __LINE__, __func__);
9370 raid_device->id = ioc->sas_id++;
9371 raid_device->channel = RAID_CHANNEL;
9372 raid_device->handle = handle;
9373 raid_device->wwid = wwid;
9374 _scsih_raid_device_add(ioc, raid_device);
9375 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9376 raid_device->id, 0);
9378 _scsih_raid_device_remove(ioc, raid_device);
9381 case MPI2_RAID_VOL_STATE_INITIALIZING:
9388 * _scsih_sas_ir_physical_disk_event - PD event
9389 * @ioc: per adapter object
9390 * @fw_event: The fw_event_work object
9394 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9395 struct fw_event_work *fw_event)
9397 u16 handle, parent_handle;
9399 struct _sas_device *sas_device;
9400 Mpi2ConfigReply_t mpi_reply;
9401 Mpi2SasDevicePage0_t sas_device_pg0;
9403 Mpi2EventDataIrPhysicalDisk_t *event_data =
9404 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
9407 if (ioc->shost_recovery)
9410 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9413 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9414 state = le32_to_cpu(event_data->NewValue);
9416 if (!ioc->hide_ir_msg)
9418 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9420 le32_to_cpu(event_data->PreviousValue),
9424 case MPI2_RAID_PD_STATE_ONLINE:
9425 case MPI2_RAID_PD_STATE_DEGRADED:
9426 case MPI2_RAID_PD_STATE_REBUILDING:
9427 case MPI2_RAID_PD_STATE_OPTIMAL:
9428 case MPI2_RAID_PD_STATE_HOT_SPARE:
9430 if (!ioc->is_warpdrive)
9431 set_bit(handle, ioc->pd_handles);
9433 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9435 sas_device_put(sas_device);
9439 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9440 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9442 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9443 __FILE__, __LINE__, __func__);
9447 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9448 MPI2_IOCSTATUS_MASK;
9449 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9450 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9451 __FILE__, __LINE__, __func__);
9455 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9456 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9457 mpt3sas_transport_update_links(ioc, sas_address, handle,
9458 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9459 mpt3sas_get_port_by_id(ioc,
9460 sas_device_pg0.PhysicalPort, 0));
9462 _scsih_add_device(ioc, handle, 0, 1);
9466 case MPI2_RAID_PD_STATE_OFFLINE:
9467 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9468 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9475 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
9476 * @ioc: per adapter object
9477 * @event_data: event data payload
9481 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9482 Mpi2EventDataIrOperationStatus_t *event_data)
9484 char *reason_str = NULL;
9486 switch (event_data->RAIDOperation) {
9487 case MPI2_EVENT_IR_RAIDOP_RESYNC:
9488 reason_str = "resync";
9490 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9491 reason_str = "online capacity expansion";
9493 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9494 reason_str = "consistency check";
9496 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9497 reason_str = "background init";
9499 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9500 reason_str = "make data consistent";
9507 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9509 le16_to_cpu(event_data->VolDevHandle),
9510 event_data->PercentComplete);
9514 * _scsih_sas_ir_operation_status_event - handle RAID operation events
9515 * @ioc: per adapter object
9516 * @fw_event: The fw_event_work object
9520 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9521 struct fw_event_work *fw_event)
9523 Mpi2EventDataIrOperationStatus_t *event_data =
9524 (Mpi2EventDataIrOperationStatus_t *)
9525 fw_event->event_data;
9526 static struct _raid_device *raid_device;
9527 unsigned long flags;
9530 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9531 (!ioc->hide_ir_msg))
9532 _scsih_sas_ir_operation_status_event_debug(ioc,
9535 /* code added for raid transport support */
9536 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9538 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9539 handle = le16_to_cpu(event_data->VolDevHandle);
9540 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9542 raid_device->percent_complete =
9543 event_data->PercentComplete;
9544 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9549 * _scsih_prep_device_scan - initialize parameters prior to device scan
9550 * @ioc: per adapter object
9552 * Set the deleted flag prior to device scan. If the device is found during
9553 * the scan, then we clear the deleted flag.
9556 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9558 struct MPT3SAS_DEVICE *sas_device_priv_data;
9559 struct scsi_device *sdev;
9561 shost_for_each_device(sdev, ioc->shost) {
9562 sas_device_priv_data = sdev->hostdata;
9563 if (sas_device_priv_data && sas_device_priv_data->sas_target)
9564 sas_device_priv_data->sas_target->deleted = 1;
9569 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
9570 * @ioc: per adapter object
9571 * @sas_device_pg0: SAS Device page 0
9573 * After host reset, find out whether devices are still responding.
9574 * Used in _scsih_remove_unresponsive_sas_devices.
9577 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9578 Mpi2SasDevicePage0_t *sas_device_pg0)
9580 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9581 struct scsi_target *starget;
9582 struct _sas_device *sas_device = NULL;
9583 struct _enclosure_node *enclosure_dev = NULL;
9584 unsigned long flags;
9585 struct hba_port *port = mpt3sas_get_port_by_id(
9586 ioc, sas_device_pg0->PhysicalPort, 0);
9588 if (sas_device_pg0->EnclosureHandle) {
9590 mpt3sas_scsih_enclosure_find_by_handle(ioc,
9591 le16_to_cpu(sas_device_pg0->EnclosureHandle));
9592 if (enclosure_dev == NULL)
9593 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9594 sas_device_pg0->EnclosureHandle);
9596 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9597 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
9598 if (sas_device->sas_address != le64_to_cpu(
9599 sas_device_pg0->SASAddress))
9601 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9603 if (sas_device->port != port)
9605 sas_device->responding = 1;
9606 starget = sas_device->starget;
9607 if (starget && starget->hostdata) {
9608 sas_target_priv_data = starget->hostdata;
9609 sas_target_priv_data->tm_busy = 0;
9610 sas_target_priv_data->deleted = 0;
9612 sas_target_priv_data = NULL;
9614 starget_printk(KERN_INFO, starget,
9615 "handle(0x%04x), sas_addr(0x%016llx)\n",
9616 le16_to_cpu(sas_device_pg0->DevHandle),
9617 (unsigned long long)
9618 sas_device->sas_address);
9620 if (sas_device->enclosure_handle != 0)
9621 starget_printk(KERN_INFO, starget,
9622 "enclosure logical id(0x%016llx), slot(%d)\n",
9623 (unsigned long long)
9624 sas_device->enclosure_logical_id,
9627 if (le16_to_cpu(sas_device_pg0->Flags) &
9628 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9629 sas_device->enclosure_level =
9630 sas_device_pg0->EnclosureLevel;
9631 memcpy(&sas_device->connector_name[0],
9632 &sas_device_pg0->ConnectorName[0], 4);
9634 sas_device->enclosure_level = 0;
9635 sas_device->connector_name[0] = '\0';
9638 sas_device->enclosure_handle =
9639 le16_to_cpu(sas_device_pg0->EnclosureHandle);
9640 sas_device->is_chassis_slot_valid = 0;
9641 if (enclosure_dev) {
9642 sas_device->enclosure_logical_id = le64_to_cpu(
9643 enclosure_dev->pg0.EnclosureLogicalID);
9644 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9645 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9646 sas_device->is_chassis_slot_valid = 1;
9647 sas_device->chassis_slot =
9648 enclosure_dev->pg0.ChassisSlot;
9652 if (sas_device->handle == le16_to_cpu(
9653 sas_device_pg0->DevHandle))
9655 pr_info("\thandle changed from(0x%04x)!!!\n",
9656 sas_device->handle);
9657 sas_device->handle = le16_to_cpu(
9658 sas_device_pg0->DevHandle);
9659 if (sas_target_priv_data)
9660 sas_target_priv_data->handle =
9661 le16_to_cpu(sas_device_pg0->DevHandle);
9665 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9669 * _scsih_create_enclosure_list_after_reset - Free Existing list,
9670 * And create enclosure list by scanning all Enclosure Page(0)s
9671 * @ioc: per adapter object
9674 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9676 struct _enclosure_node *enclosure_dev;
9677 Mpi2ConfigReply_t mpi_reply;
9678 u16 enclosure_handle;
9681 /* Free existing enclosure list */
9682 mpt3sas_free_enclosure_list(ioc);
9684 /* Re constructing enclosure list after reset*/
9685 enclosure_handle = 0xFFFF;
9688 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9689 if (!enclosure_dev) {
9690 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9691 __FILE__, __LINE__, __func__);
9694 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9695 &enclosure_dev->pg0,
9696 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9699 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9700 MPI2_IOCSTATUS_MASK)) {
9701 kfree(enclosure_dev);
9704 list_add_tail(&enclosure_dev->list,
9705 &ioc->enclosure_list);
9707 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9712 * _scsih_search_responding_sas_devices -
9713 * @ioc: per adapter object
9715 * After host reset, find out whether devices are still responding.
9719 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9721 Mpi2SasDevicePage0_t sas_device_pg0;
9722 Mpi2ConfigReply_t mpi_reply;
9727 ioc_info(ioc, "search for end-devices: start\n");
9729 if (list_empty(&ioc->sas_device_list))
9733 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9734 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9736 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9737 MPI2_IOCSTATUS_MASK;
9738 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9740 handle = le16_to_cpu(sas_device_pg0.DevHandle);
9741 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9742 if (!(_scsih_is_end_device(device_info)))
9744 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
9748 ioc_info(ioc, "search for end-devices: complete\n");
9752 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
9753 * @ioc: per adapter object
9754 * @pcie_device_pg0: PCIe Device page 0
9756 * After host reset, find out whether devices are still responding.
9757 * Used in _scsih_remove_unresponding_devices.
9760 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9761 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9763 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9764 struct scsi_target *starget;
9765 struct _pcie_device *pcie_device;
9766 unsigned long flags;
9768 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9769 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
9770 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9771 && (pcie_device->slot == le16_to_cpu(
9772 pcie_device_pg0->Slot))) {
9773 pcie_device->access_status =
9774 pcie_device_pg0->AccessStatus;
9775 pcie_device->responding = 1;
9776 starget = pcie_device->starget;
9777 if (starget && starget->hostdata) {
9778 sas_target_priv_data = starget->hostdata;
9779 sas_target_priv_data->tm_busy = 0;
9780 sas_target_priv_data->deleted = 0;
9782 sas_target_priv_data = NULL;
9784 starget_printk(KERN_INFO, starget,
9785 "handle(0x%04x), wwid(0x%016llx) ",
9786 pcie_device->handle,
9787 (unsigned long long)pcie_device->wwid);
9788 if (pcie_device->enclosure_handle != 0)
9789 starget_printk(KERN_INFO, starget,
9790 "enclosure logical id(0x%016llx), "
9792 (unsigned long long)
9793 pcie_device->enclosure_logical_id,
9797 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9798 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9799 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9800 pcie_device->enclosure_level =
9801 pcie_device_pg0->EnclosureLevel;
9802 memcpy(&pcie_device->connector_name[0],
9803 &pcie_device_pg0->ConnectorName[0], 4);
9805 pcie_device->enclosure_level = 0;
9806 pcie_device->connector_name[0] = '\0';
9809 if (pcie_device->handle == le16_to_cpu(
9810 pcie_device_pg0->DevHandle))
9812 pr_info("\thandle changed from(0x%04x)!!!\n",
9813 pcie_device->handle);
9814 pcie_device->handle = le16_to_cpu(
9815 pcie_device_pg0->DevHandle);
9816 if (sas_target_priv_data)
9817 sas_target_priv_data->handle =
9818 le16_to_cpu(pcie_device_pg0->DevHandle);
9824 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9828 * _scsih_search_responding_pcie_devices -
9829 * @ioc: per adapter object
9831 * After host reset, find out whether devices are still responding.
9835 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9837 Mpi26PCIeDevicePage0_t pcie_device_pg0;
9838 Mpi2ConfigReply_t mpi_reply;
9843 ioc_info(ioc, "search for end-devices: start\n");
9845 if (list_empty(&ioc->pcie_device_list))
9849 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9850 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9852 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9853 MPI2_IOCSTATUS_MASK;
9854 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9855 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9856 __func__, ioc_status,
9857 le32_to_cpu(mpi_reply.IOCLogInfo));
9860 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9861 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9862 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
9864 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
9867 ioc_info(ioc, "search for PCIe end-devices: complete\n");
9871 * _scsih_mark_responding_raid_device - mark a raid_device as responding
9872 * @ioc: per adapter object
9873 * @wwid: world wide identifier for raid volume
9874 * @handle: device handle
9876 * After host reset, find out whether devices are still responding.
9877 * Used in _scsih_remove_unresponsive_raid_devices.
9880 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
9883 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9884 struct scsi_target *starget;
9885 struct _raid_device *raid_device;
9886 unsigned long flags;
9888 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9889 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
9890 if (raid_device->wwid == wwid && raid_device->starget) {
9891 starget = raid_device->starget;
9892 if (starget && starget->hostdata) {
9893 sas_target_priv_data = starget->hostdata;
9894 sas_target_priv_data->deleted = 0;
9896 sas_target_priv_data = NULL;
9897 raid_device->responding = 1;
9898 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9899 starget_printk(KERN_INFO, raid_device->starget,
9900 "handle(0x%04x), wwid(0x%016llx)\n", handle,
9901 (unsigned long long)raid_device->wwid);
9904 * WARPDRIVE: The handles of the PDs might have changed
9905 * across the host reset so re-initialize the
9906 * required data for Direct IO
9908 mpt3sas_init_warpdrive_properties(ioc, raid_device);
9909 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9910 if (raid_device->handle == handle) {
9911 spin_unlock_irqrestore(&ioc->raid_device_lock,
9915 pr_info("\thandle changed from(0x%04x)!!!\n",
9916 raid_device->handle);
9917 raid_device->handle = handle;
9918 if (sas_target_priv_data)
9919 sas_target_priv_data->handle = handle;
9920 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9924 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9928 * _scsih_search_responding_raid_devices -
9929 * @ioc: per adapter object
9931 * After host reset, find out whether devices are still responding.
9935 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
9937 Mpi2RaidVolPage1_t volume_pg1;
9938 Mpi2RaidVolPage0_t volume_pg0;
9939 Mpi2RaidPhysDiskPage0_t pd_pg0;
9940 Mpi2ConfigReply_t mpi_reply;
9945 if (!ioc->ir_firmware)
9948 ioc_info(ioc, "search for raid volumes: start\n");
9950 if (list_empty(&ioc->raid_device_list))
9954 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9955 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9956 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9957 MPI2_IOCSTATUS_MASK;
9958 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9960 handle = le16_to_cpu(volume_pg1.DevHandle);
9962 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9963 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9964 sizeof(Mpi2RaidVolPage0_t)))
9967 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9968 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9969 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
9970 _scsih_mark_responding_raid_device(ioc,
9971 le64_to_cpu(volume_pg1.WWID), handle);
9974 /* refresh the pd_handles */
9975 if (!ioc->is_warpdrive) {
9976 phys_disk_num = 0xFF;
9977 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
9978 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9979 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9981 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9982 MPI2_IOCSTATUS_MASK;
9983 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9985 phys_disk_num = pd_pg0.PhysDiskNum;
9986 handle = le16_to_cpu(pd_pg0.DevHandle);
9987 set_bit(handle, ioc->pd_handles);
9991 ioc_info(ioc, "search for responding raid volumes: complete\n");
9995 * _scsih_mark_responding_expander - mark a expander as responding
9996 * @ioc: per adapter object
9997 * @expander_pg0:SAS Expander Config Page0
9999 * After host reset, find out whether devices are still responding.
10000 * Used in _scsih_remove_unresponsive_expanders.
10003 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
10004 Mpi2ExpanderPage0_t *expander_pg0)
10006 struct _sas_node *sas_expander = NULL;
10007 unsigned long flags;
10009 struct _enclosure_node *enclosure_dev = NULL;
10010 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
10011 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
10012 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
10013 struct hba_port *port = mpt3sas_get_port_by_id(
10014 ioc, expander_pg0->PhysicalPort, 0);
10016 if (enclosure_handle)
10018 mpt3sas_scsih_enclosure_find_by_handle(ioc,
10021 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10022 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10023 if (sas_expander->sas_address != sas_address)
10025 if (sas_expander->port != port)
10027 sas_expander->responding = 1;
10029 if (enclosure_dev) {
10030 sas_expander->enclosure_logical_id =
10031 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10032 sas_expander->enclosure_handle =
10033 le16_to_cpu(expander_pg0->EnclosureHandle);
10036 if (sas_expander->handle == handle)
10038 pr_info("\texpander(0x%016llx): handle changed" \
10039 " from(0x%04x) to (0x%04x)!!!\n",
10040 (unsigned long long)sas_expander->sas_address,
10041 sas_expander->handle, handle);
10042 sas_expander->handle = handle;
10043 for (i = 0 ; i < sas_expander->num_phys ; i++)
10044 sas_expander->phy[i].handle = handle;
10048 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10052 * _scsih_search_responding_expanders -
10053 * @ioc: per adapter object
10055 * After host reset, find out whether devices are still responding.
10059 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10061 Mpi2ExpanderPage0_t expander_pg0;
10062 Mpi2ConfigReply_t mpi_reply;
10068 ioc_info(ioc, "search for expanders: start\n");
10070 if (list_empty(&ioc->sas_expander_list))
10074 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10075 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10077 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10078 MPI2_IOCSTATUS_MASK;
10079 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10082 handle = le16_to_cpu(expander_pg0.DevHandle);
10083 sas_address = le64_to_cpu(expander_pg0.SASAddress);
10084 port = expander_pg0.PhysicalPort;
10086 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10087 handle, (unsigned long long)sas_address,
10088 (ioc->multipath_on_hba ?
10089 port : MULTIPATH_DISABLED_PORT_ID));
10090 _scsih_mark_responding_expander(ioc, &expander_pg0);
10094 ioc_info(ioc, "search for expanders: complete\n");
10098 * _scsih_remove_unresponding_devices - removing unresponding devices
10099 * @ioc: per adapter object
10102 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
10104 struct _sas_device *sas_device, *sas_device_next;
10105 struct _sas_node *sas_expander, *sas_expander_next;
10106 struct _raid_device *raid_device, *raid_device_next;
10107 struct _pcie_device *pcie_device, *pcie_device_next;
10108 struct list_head tmp_list;
10109 unsigned long flags;
10112 ioc_info(ioc, "removing unresponding devices: start\n");
10114 /* removing unresponding end devices */
10115 ioc_info(ioc, "removing unresponding devices: end-devices\n");
10117 * Iterate, pulling off devices marked as non-responding. We become the
10118 * owner for the reference the list had on any object we prune.
10120 spin_lock_irqsave(&ioc->sas_device_lock, flags);
10121 list_for_each_entry_safe(sas_device, sas_device_next,
10122 &ioc->sas_device_list, list) {
10123 if (!sas_device->responding)
10124 list_move_tail(&sas_device->list, &head);
10126 sas_device->responding = 0;
10128 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10131 * Now, uninitialize and remove the unresponding devices we pruned.
10133 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10134 _scsih_remove_device(ioc, sas_device);
10135 list_del_init(&sas_device->list);
10136 sas_device_put(sas_device);
10139 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
10140 INIT_LIST_HEAD(&head);
10141 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10142 list_for_each_entry_safe(pcie_device, pcie_device_next,
10143 &ioc->pcie_device_list, list) {
10144 if (!pcie_device->responding)
10145 list_move_tail(&pcie_device->list, &head);
10147 pcie_device->responding = 0;
10149 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10151 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10152 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10153 list_del_init(&pcie_device->list);
10154 pcie_device_put(pcie_device);
10157 /* removing unresponding volumes */
10158 if (ioc->ir_firmware) {
10159 ioc_info(ioc, "removing unresponding devices: volumes\n");
10160 list_for_each_entry_safe(raid_device, raid_device_next,
10161 &ioc->raid_device_list, list) {
10162 if (!raid_device->responding)
10163 _scsih_sas_volume_delete(ioc,
10164 raid_device->handle);
10166 raid_device->responding = 0;
10170 /* removing unresponding expanders */
10171 ioc_info(ioc, "removing unresponding devices: expanders\n");
10172 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10173 INIT_LIST_HEAD(&tmp_list);
10174 list_for_each_entry_safe(sas_expander, sas_expander_next,
10175 &ioc->sas_expander_list, list) {
10176 if (!sas_expander->responding)
10177 list_move_tail(&sas_expander->list, &tmp_list);
10179 sas_expander->responding = 0;
10181 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10182 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10184 _scsih_expander_node_remove(ioc, sas_expander);
10187 ioc_info(ioc, "removing unresponding devices: complete\n");
10189 /* unblock devices */
10190 _scsih_ublock_io_all_device(ioc);
10194 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10195 struct _sas_node *sas_expander, u16 handle)
10197 Mpi2ExpanderPage1_t expander_pg1;
10198 Mpi2ConfigReply_t mpi_reply;
10201 for (i = 0 ; i < sas_expander->num_phys ; i++) {
10202 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10203 &expander_pg1, i, handle))) {
10204 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10205 __FILE__, __LINE__, __func__);
10209 mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10210 le16_to_cpu(expander_pg1.AttachedDevHandle), i,
10211 expander_pg1.NegotiatedLinkRate >> 4,
10212 sas_expander->port);
10217 * _scsih_scan_for_devices_after_reset - scan for devices after host reset
10218 * @ioc: per adapter object
10221 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10223 Mpi2ExpanderPage0_t expander_pg0;
10224 Mpi2SasDevicePage0_t sas_device_pg0;
10225 Mpi26PCIeDevicePage0_t pcie_device_pg0;
10226 Mpi2RaidVolPage1_t *volume_pg1;
10227 Mpi2RaidVolPage0_t *volume_pg0;
10228 Mpi2RaidPhysDiskPage0_t pd_pg0;
10229 Mpi2EventIrConfigElement_t element;
10230 Mpi2ConfigReply_t mpi_reply;
10231 u8 phys_disk_num, port_id;
10233 u16 handle, parent_handle;
10235 struct _sas_device *sas_device;
10236 struct _pcie_device *pcie_device;
10237 struct _sas_node *expander_device;
10238 static struct _raid_device *raid_device;
10240 unsigned long flags;
10242 volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL);
10246 volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL);
10252 ioc_info(ioc, "scan devices: start\n");
10254 _scsih_sas_host_refresh(ioc);
10256 ioc_info(ioc, "\tscan devices: expanders start\n");
10260 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10261 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10262 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10263 MPI2_IOCSTATUS_MASK;
10264 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10265 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10266 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10269 handle = le16_to_cpu(expander_pg0.DevHandle);
10270 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10271 port_id = expander_pg0.PhysicalPort;
10272 expander_device = mpt3sas_scsih_expander_find_by_sas_address(
10273 ioc, le64_to_cpu(expander_pg0.SASAddress),
10274 mpt3sas_get_port_by_id(ioc, port_id, 0));
10275 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10276 if (expander_device)
10277 _scsih_refresh_expander_links(ioc, expander_device,
10280 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10282 (u64)le64_to_cpu(expander_pg0.SASAddress));
10283 _scsih_expander_add(ioc, handle);
10284 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10286 (u64)le64_to_cpu(expander_pg0.SASAddress));
10290 ioc_info(ioc, "\tscan devices: expanders complete\n");
10292 if (!ioc->ir_firmware)
10295 ioc_info(ioc, "\tscan devices: phys disk start\n");
10298 phys_disk_num = 0xFF;
10299 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10300 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10302 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10303 MPI2_IOCSTATUS_MASK;
10304 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10305 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10306 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10309 phys_disk_num = pd_pg0.PhysDiskNum;
10310 handle = le16_to_cpu(pd_pg0.DevHandle);
10311 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10313 sas_device_put(sas_device);
10316 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10317 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10320 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10321 MPI2_IOCSTATUS_MASK;
10322 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10323 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10324 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10327 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10328 if (!_scsih_get_sas_address(ioc, parent_handle,
10330 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10332 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10333 port_id = sas_device_pg0.PhysicalPort;
10334 mpt3sas_transport_update_links(ioc, sas_address,
10335 handle, sas_device_pg0.PhyNum,
10336 MPI2_SAS_NEG_LINK_RATE_1_5,
10337 mpt3sas_get_port_by_id(ioc, port_id, 0));
10338 set_bit(handle, ioc->pd_handles);
10340 /* This will retry adding the end device.
10341 * _scsih_add_device() will decide on retries and
10342 * return "1" when it should be retried
10344 while (_scsih_add_device(ioc, handle, retry_count++,
10348 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10350 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10354 ioc_info(ioc, "\tscan devices: phys disk complete\n");
10356 ioc_info(ioc, "\tscan devices: volumes start\n");
10360 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10361 volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10362 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10363 MPI2_IOCSTATUS_MASK;
10364 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10365 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10366 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10369 handle = le16_to_cpu(volume_pg1->DevHandle);
10370 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10371 raid_device = _scsih_raid_device_find_by_wwid(ioc,
10372 le64_to_cpu(volume_pg1->WWID));
10373 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10376 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10377 volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10378 sizeof(Mpi2RaidVolPage0_t)))
10380 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10381 MPI2_IOCSTATUS_MASK;
10382 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10383 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10384 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10387 if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10388 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10389 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
10390 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10391 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
10392 element.VolDevHandle = volume_pg1->DevHandle;
10393 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
10394 volume_pg1->DevHandle);
10395 _scsih_sas_volume_add(ioc, &element);
10396 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
10397 volume_pg1->DevHandle);
10401 ioc_info(ioc, "\tscan devices: volumes complete\n");
10405 ioc_info(ioc, "\tscan devices: end devices start\n");
10409 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10410 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10412 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10413 MPI2_IOCSTATUS_MASK;
10414 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10415 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10416 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10419 handle = le16_to_cpu(sas_device_pg0.DevHandle);
10420 if (!(_scsih_is_end_device(
10421 le32_to_cpu(sas_device_pg0.DeviceInfo))))
10423 port_id = sas_device_pg0.PhysicalPort;
10424 sas_device = mpt3sas_get_sdev_by_addr(ioc,
10425 le64_to_cpu(sas_device_pg0.SASAddress),
10426 mpt3sas_get_port_by_id(ioc, port_id, 0));
10428 sas_device_put(sas_device);
10431 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10432 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
10433 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10435 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10436 mpt3sas_transport_update_links(ioc, sas_address, handle,
10437 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10438 mpt3sas_get_port_by_id(ioc, port_id, 0));
10440 /* This will retry adding the end device.
10441 * _scsih_add_device() will decide on retries and
10442 * return "1" when it should be retried
10444 while (_scsih_add_device(ioc, handle, retry_count++,
10448 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10450 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10453 ioc_info(ioc, "\tscan devices: end devices complete\n");
10454 ioc_info(ioc, "\tscan devices: pcie end devices start\n");
10458 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10459 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10461 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10462 & MPI2_IOCSTATUS_MASK;
10463 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10464 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10465 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10468 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
10469 if (!(_scsih_is_nvme_pciescsi_device(
10470 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10472 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10473 le64_to_cpu(pcie_device_pg0.WWID));
10475 pcie_device_put(pcie_device);
10479 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10480 _scsih_pcie_add_device(ioc, handle);
10482 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10483 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
10489 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10490 ioc_info(ioc, "scan devices: complete\n");
10494 * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
10495 * @ioc: per adapter object
10497 * The handler for doing any required cleanup or initialization.
10499 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10501 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
10505 * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
10507 * @ioc: per adapter object
10509 * The handler for doing any required cleanup or initialization.
10512 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
10515 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
10516 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10517 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10518 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10519 complete(&ioc->scsih_cmds.done);
10521 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10522 ioc->tm_cmds.status |= MPT3_CMD_RESET;
10523 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10524 complete(&ioc->tm_cmds.done);
10527 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10528 memset(ioc->device_remove_in_progress, 0,
10529 ioc->device_remove_in_progress_sz);
10530 _scsih_fw_event_cleanup_queue(ioc);
10531 _scsih_flush_running_cmds(ioc);
10535 * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
10536 * @ioc: per adapter object
10538 * The handler for doing any required cleanup or initialization.
10541 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10543 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
10544 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
10545 !ioc->sas_hba.num_phys)) {
10546 if (ioc->multipath_on_hba) {
10547 _scsih_sas_port_refresh(ioc);
10548 _scsih_update_vphys_after_reset(ioc);
10550 _scsih_prep_device_scan(ioc);
10551 _scsih_create_enclosure_list_after_reset(ioc);
10552 _scsih_search_responding_sas_devices(ioc);
10553 _scsih_search_responding_pcie_devices(ioc);
10554 _scsih_search_responding_raid_devices(ioc);
10555 _scsih_search_responding_expanders(ioc);
10556 _scsih_error_recovery_delete_devices(ioc);
10561 * _mpt3sas_fw_work - delayed task for processing firmware events
10562 * @ioc: per adapter object
10563 * @fw_event: The fw_event_work object
10567 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10569 ioc->current_event = fw_event;
10570 _scsih_fw_event_del_from_list(ioc, fw_event);
10572 /* the queue is being flushed so ignore this event */
10573 if (ioc->remove_host || ioc->pci_error_recovery) {
10574 fw_event_work_put(fw_event);
10575 ioc->current_event = NULL;
10579 switch (fw_event->event) {
10580 case MPT3SAS_PROCESS_TRIGGER_DIAG:
10581 mpt3sas_process_trigger_data(ioc,
10582 (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10583 fw_event->event_data);
10585 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
10586 while (scsi_host_in_recovery(ioc->shost) ||
10587 ioc->shost_recovery) {
10589 * If we're unloading or cancelling the work, bail.
10590 * Otherwise, this can become an infinite loop.
10592 if (ioc->remove_host || ioc->fw_events_cleanup)
10596 _scsih_remove_unresponding_devices(ioc);
10597 _scsih_del_dirty_vphy(ioc);
10598 _scsih_del_dirty_port_entries(ioc);
10599 _scsih_scan_for_devices_after_reset(ioc);
10600 _scsih_set_nvme_max_shutdown_latency(ioc);
10602 case MPT3SAS_PORT_ENABLE_COMPLETE:
10603 ioc->start_scan = 0;
10604 if (missing_delay[0] != -1 && missing_delay[1] != -1)
10605 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10608 ioc_info(ioc, "port enable: complete from worker thread\n"));
10610 case MPT3SAS_TURN_ON_PFA_LED:
10611 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
10613 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10614 _scsih_sas_topology_change_event(ioc, fw_event);
10616 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10617 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10618 _scsih_sas_device_status_change_event_debug(ioc,
10619 (Mpi2EventDataSasDeviceStatusChange_t *)
10620 fw_event->event_data);
10622 case MPI2_EVENT_SAS_DISCOVERY:
10623 _scsih_sas_discovery_event(ioc, fw_event);
10625 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10626 _scsih_sas_device_discovery_error_event(ioc, fw_event);
10628 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10629 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
10631 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10632 _scsih_sas_enclosure_dev_status_change_event(ioc,
10635 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10636 _scsih_sas_ir_config_change_event(ioc, fw_event);
10638 case MPI2_EVENT_IR_VOLUME:
10639 _scsih_sas_ir_volume_event(ioc, fw_event);
10641 case MPI2_EVENT_IR_PHYSICAL_DISK:
10642 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
10644 case MPI2_EVENT_IR_OPERATION_STATUS:
10645 _scsih_sas_ir_operation_status_event(ioc, fw_event);
10647 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10648 _scsih_pcie_device_status_change_event(ioc, fw_event);
10650 case MPI2_EVENT_PCIE_ENUMERATION:
10651 _scsih_pcie_enumeration_event(ioc, fw_event);
10653 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10654 _scsih_pcie_topology_change_event(ioc, fw_event);
10655 ioc->current_event = NULL;
10660 fw_event_work_put(fw_event);
10661 ioc->current_event = NULL;
10665 * _firmware_event_work
10666 * @work: The fw_event_work object
10669 * wrappers for the work thread handling firmware events
10673 _firmware_event_work(struct work_struct *work)
10675 struct fw_event_work *fw_event = container_of(work,
10676 struct fw_event_work, work);
10678 _mpt3sas_fw_work(fw_event->ioc, fw_event);
10682 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
10683 * @ioc: per adapter object
10684 * @msix_index: MSIX table index supplied by the OS
10685 * @reply: reply message frame(lower 32bit addr)
10686 * Context: interrupt.
10688 * This function merely adds a new work task into ioc->firmware_event_thread.
10689 * The tasks are worked from _firmware_event_work in user context.
10691 * Return: 1 meaning mf should be freed from _base_interrupt
10692 * 0 means the mf is freed from this function.
10695 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10698 struct fw_event_work *fw_event;
10699 Mpi2EventNotificationReply_t *mpi_reply;
10702 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
10704 /* events turned off due to host reset */
10705 if (ioc->pci_error_recovery)
10708 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10710 if (unlikely(!mpi_reply)) {
10711 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10712 __FILE__, __LINE__, __func__);
10716 event = le16_to_cpu(mpi_reply->Event);
10718 if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10719 mpt3sas_trigger_event(ioc, event, 0);
10723 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10725 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10726 (Mpi2EventDataSasBroadcastPrimitive_t *)
10727 mpi_reply->EventData;
10729 if (baen_data->Primitive !=
10730 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10733 if (ioc->broadcast_aen_busy) {
10734 ioc->broadcast_aen_pending++;
10737 ioc->broadcast_aen_busy = 1;
10741 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10742 _scsih_check_topo_delete_events(ioc,
10743 (Mpi2EventDataSasTopologyChangeList_t *)
10744 mpi_reply->EventData);
10746 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10747 _scsih_check_pcie_topo_remove_events(ioc,
10748 (Mpi26EventDataPCIeTopologyChangeList_t *)
10749 mpi_reply->EventData);
10751 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10752 _scsih_check_ir_config_unhide_events(ioc,
10753 (Mpi2EventDataIrConfigChangeList_t *)
10754 mpi_reply->EventData);
10756 case MPI2_EVENT_IR_VOLUME:
10757 _scsih_check_volume_delete_events(ioc,
10758 (Mpi2EventDataIrVolume_t *)
10759 mpi_reply->EventData);
10761 case MPI2_EVENT_LOG_ENTRY_ADDED:
10763 Mpi2EventDataLogEntryAdded_t *log_entry;
10766 if (!ioc->is_warpdrive)
10769 log_entry = (Mpi2EventDataLogEntryAdded_t *)
10770 mpi_reply->EventData;
10771 log_code = (u32 *)log_entry->LogData;
10773 if (le16_to_cpu(log_entry->LogEntryQualifier)
10774 != MPT2_WARPDRIVE_LOGENTRY)
10777 switch (le32_to_cpu(*log_code)) {
10778 case MPT2_WARPDRIVE_LC_SSDT:
10779 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10781 case MPT2_WARPDRIVE_LC_SSDLW:
10782 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
10784 case MPT2_WARPDRIVE_LC_SSDLF:
10785 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
10787 case MPT2_WARPDRIVE_LC_BRMF:
10788 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10794 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10795 _scsih_sas_device_status_change_event(ioc,
10796 (Mpi2EventDataSasDeviceStatusChange_t *)
10797 mpi_reply->EventData);
10799 case MPI2_EVENT_IR_OPERATION_STATUS:
10800 case MPI2_EVENT_SAS_DISCOVERY:
10801 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10802 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10803 case MPI2_EVENT_IR_PHYSICAL_DISK:
10804 case MPI2_EVENT_PCIE_ENUMERATION:
10805 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10808 case MPI2_EVENT_TEMP_THRESHOLD:
10809 _scsih_temp_threshold_events(ioc,
10810 (Mpi2EventDataTemperature_t *)
10811 mpi_reply->EventData);
10813 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10814 ActiveCableEventData =
10815 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
10816 switch (ActiveCableEventData->ReasonCode) {
10817 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
10818 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
10819 ActiveCableEventData->ReceptacleID);
10820 pr_notice("cannot be powered and devices connected\n");
10821 pr_notice("to this active cable will not be seen\n");
10822 pr_notice("This active cable requires %d mW of power\n",
10824 ActiveCableEventData->ActiveCablePowerRequirement));
10827 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
10828 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
10829 ActiveCableEventData->ReceptacleID);
10831 "is not running at optimal speed(12 Gb/s rate)\n");
10837 default: /* ignore the rest */
10841 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
10842 fw_event = alloc_fw_event_work(sz);
10844 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10845 __FILE__, __LINE__, __func__);
10849 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
10850 fw_event->ioc = ioc;
10851 fw_event->VF_ID = mpi_reply->VF_ID;
10852 fw_event->VP_ID = mpi_reply->VP_ID;
10853 fw_event->event = event;
10854 _scsih_fw_event_add(ioc, fw_event);
10855 fw_event_work_put(fw_event);
10860 * _scsih_expander_node_remove - removing expander device from list.
10861 * @ioc: per adapter object
10862 * @sas_expander: the sas_device object
10864 * Removing object and freeing associated memory from the
10865 * ioc->sas_expander_list.
10868 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
10869 struct _sas_node *sas_expander)
10871 struct _sas_port *mpt3sas_port, *next;
10872 unsigned long flags;
10874 /* remove sibling ports attached to this expander */
10875 list_for_each_entry_safe(mpt3sas_port, next,
10876 &sas_expander->sas_port_list, port_list) {
10877 if (ioc->shost_recovery)
10879 if (mpt3sas_port->remote_identify.device_type ==
10881 mpt3sas_device_remove_by_sas_address(ioc,
10882 mpt3sas_port->remote_identify.sas_address,
10883 mpt3sas_port->hba_port);
10884 else if (mpt3sas_port->remote_identify.device_type ==
10885 SAS_EDGE_EXPANDER_DEVICE ||
10886 mpt3sas_port->remote_identify.device_type ==
10887 SAS_FANOUT_EXPANDER_DEVICE)
10888 mpt3sas_expander_remove(ioc,
10889 mpt3sas_port->remote_identify.sas_address,
10890 mpt3sas_port->hba_port);
10893 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
10894 sas_expander->sas_address_parent, sas_expander->port);
10897 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10898 sas_expander->handle, (unsigned long long)
10899 sas_expander->sas_address,
10900 sas_expander->port->port_id);
10902 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10903 list_del(&sas_expander->list);
10904 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10906 kfree(sas_expander->phy);
10907 kfree(sas_expander);
10911 * _scsih_nvme_shutdown - NVMe shutdown notification
10912 * @ioc: per adapter object
10914 * Sending IoUnitControl request with shutdown operation code to alert IOC that
10915 * the host system is shutting down so that IOC can issue NVMe shutdown to
10916 * NVMe drives attached to it.
10919 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
10921 Mpi26IoUnitControlRequest_t *mpi_request;
10922 Mpi26IoUnitControlReply_t *mpi_reply;
10925 /* are there any NVMe devices ? */
10926 if (list_empty(&ioc->pcie_device_list))
10929 mutex_lock(&ioc->scsih_cmds.mutex);
10931 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
10932 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
10936 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
10938 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
10941 "%s: failed obtaining a smid\n", __func__);
10942 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10946 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
10947 ioc->scsih_cmds.smid = smid;
10948 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
10949 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
10950 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
10952 init_completion(&ioc->scsih_cmds.done);
10953 ioc->put_smid_default(ioc, smid);
10954 /* Wait for max_shutdown_latency seconds */
10956 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
10957 ioc->max_shutdown_latency);
10958 wait_for_completion_timeout(&ioc->scsih_cmds.done,
10959 ioc->max_shutdown_latency*HZ);
10961 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
10962 ioc_err(ioc, "%s: timeout\n", __func__);
10966 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
10967 mpi_reply = ioc->scsih_cmds.reply;
10968 ioc_info(ioc, "Io Unit Control shutdown (complete):"
10969 "ioc_status(0x%04x), loginfo(0x%08x)\n",
10970 le16_to_cpu(mpi_reply->IOCStatus),
10971 le32_to_cpu(mpi_reply->IOCLogInfo));
10974 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10975 mutex_unlock(&ioc->scsih_cmds.mutex);
10980 * _scsih_ir_shutdown - IR shutdown notification
10981 * @ioc: per adapter object
10983 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
10984 * the host system is shutting down.
10987 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
10989 Mpi2RaidActionRequest_t *mpi_request;
10990 Mpi2RaidActionReply_t *mpi_reply;
10993 /* is IR firmware build loaded ? */
10994 if (!ioc->ir_firmware)
10997 /* are there any volumes ? */
10998 if (list_empty(&ioc->raid_device_list))
11001 mutex_lock(&ioc->scsih_cmds.mutex);
11003 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11004 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11007 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11009 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11011 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
11012 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11016 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11017 ioc->scsih_cmds.smid = smid;
11018 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
11020 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
11021 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
11023 if (!ioc->hide_ir_msg)
11024 ioc_info(ioc, "IR shutdown (sending)\n");
11025 init_completion(&ioc->scsih_cmds.done);
11026 ioc->put_smid_default(ioc, smid);
11027 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11029 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11030 ioc_err(ioc, "%s: timeout\n", __func__);
11034 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11035 mpi_reply = ioc->scsih_cmds.reply;
11036 if (!ioc->hide_ir_msg)
11037 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11038 le16_to_cpu(mpi_reply->IOCStatus),
11039 le32_to_cpu(mpi_reply->IOCLogInfo));
11043 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11044 mutex_unlock(&ioc->scsih_cmds.mutex);
11048 * _scsih_get_shost_and_ioc - get shost and ioc
11049 * and verify whether they are NULL or not
11050 * @pdev: PCI device struct
11051 * @shost: address of scsi host pointer
11052 * @ioc: address of HBA adapter pointer
11054 * Return zero if *shost and *ioc are not NULL otherwise return error number.
11057 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
11058 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11060 *shost = pci_get_drvdata(pdev);
11061 if (*shost == NULL) {
11062 dev_err(&pdev->dev, "pdev's driver data is null\n");
11066 *ioc = shost_priv(*shost);
11067 if (*ioc == NULL) {
11068 dev_err(&pdev->dev, "shost's private data is null\n");
11076 * scsih_remove - detach and remove add host
11077 * @pdev: PCI device struct
11079 * Routine called when unloading the driver.
11081 static void scsih_remove(struct pci_dev *pdev)
11083 struct Scsi_Host *shost;
11084 struct MPT3SAS_ADAPTER *ioc;
11085 struct _sas_port *mpt3sas_port, *next_port;
11086 struct _raid_device *raid_device, *next;
11087 struct MPT3SAS_TARGET *sas_target_priv_data;
11088 struct _pcie_device *pcie_device, *pcienext;
11089 struct workqueue_struct *wq;
11090 unsigned long flags;
11091 Mpi2ConfigReply_t mpi_reply;
11092 struct hba_port *port, *port_next;
11094 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11097 ioc->remove_host = 1;
11099 if (!pci_device_is_present(pdev))
11100 _scsih_flush_running_cmds(ioc);
11102 _scsih_fw_event_cleanup_queue(ioc);
11104 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11105 wq = ioc->firmware_event_thread;
11106 ioc->firmware_event_thread = NULL;
11107 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11109 destroy_workqueue(wq);
11111 * Copy back the unmodified ioc page1. so that on next driver load,
11112 * current modified changes on ioc page1 won't take effect.
11114 if (ioc->is_aero_ioc)
11115 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11116 &ioc->ioc_pg1_copy);
11117 /* release all the volumes */
11118 _scsih_ir_shutdown(ioc);
11119 mpt3sas_destroy_debugfs(ioc);
11120 sas_remove_host(shost);
11121 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11123 if (raid_device->starget) {
11124 sas_target_priv_data =
11125 raid_device->starget->hostdata;
11126 sas_target_priv_data->deleted = 1;
11127 scsi_remove_target(&raid_device->starget->dev);
11129 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11130 raid_device->handle, (u64)raid_device->wwid);
11131 _scsih_raid_device_remove(ioc, raid_device);
11133 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11135 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11136 list_del_init(&pcie_device->list);
11137 pcie_device_put(pcie_device);
11140 /* free ports attached to the sas_host */
11141 list_for_each_entry_safe(mpt3sas_port, next_port,
11142 &ioc->sas_hba.sas_port_list, port_list) {
11143 if (mpt3sas_port->remote_identify.device_type ==
11145 mpt3sas_device_remove_by_sas_address(ioc,
11146 mpt3sas_port->remote_identify.sas_address,
11147 mpt3sas_port->hba_port);
11148 else if (mpt3sas_port->remote_identify.device_type ==
11149 SAS_EDGE_EXPANDER_DEVICE ||
11150 mpt3sas_port->remote_identify.device_type ==
11151 SAS_FANOUT_EXPANDER_DEVICE)
11152 mpt3sas_expander_remove(ioc,
11153 mpt3sas_port->remote_identify.sas_address,
11154 mpt3sas_port->hba_port);
11157 list_for_each_entry_safe(port, port_next,
11158 &ioc->port_table_list, list) {
11159 list_del(&port->list);
11163 /* free phys attached to the sas_host */
11164 if (ioc->sas_hba.num_phys) {
11165 kfree(ioc->sas_hba.phy);
11166 ioc->sas_hba.phy = NULL;
11167 ioc->sas_hba.num_phys = 0;
11170 mpt3sas_base_detach(ioc);
11171 spin_lock(&gioc_lock);
11172 list_del(&ioc->list);
11173 spin_unlock(&gioc_lock);
11174 scsi_host_put(shost);
11178 * scsih_shutdown - routine call during system shutdown
11179 * @pdev: PCI device struct
11182 scsih_shutdown(struct pci_dev *pdev)
11184 struct Scsi_Host *shost;
11185 struct MPT3SAS_ADAPTER *ioc;
11186 struct workqueue_struct *wq;
11187 unsigned long flags;
11188 Mpi2ConfigReply_t mpi_reply;
11190 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11193 ioc->remove_host = 1;
11195 if (!pci_device_is_present(pdev))
11196 _scsih_flush_running_cmds(ioc);
11198 _scsih_fw_event_cleanup_queue(ioc);
11200 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11201 wq = ioc->firmware_event_thread;
11202 ioc->firmware_event_thread = NULL;
11203 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11205 destroy_workqueue(wq);
11207 * Copy back the unmodified ioc page1 so that on next driver load,
11208 * current modified changes on ioc page1 won't take effect.
11210 if (ioc->is_aero_ioc)
11211 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11212 &ioc->ioc_pg1_copy);
11214 _scsih_ir_shutdown(ioc);
11215 _scsih_nvme_shutdown(ioc);
11216 mpt3sas_base_detach(ioc);
11221 * _scsih_probe_boot_devices - reports 1st device
11222 * @ioc: per adapter object
11224 * If specified in bios page 2, this routine reports the 1st
11225 * device scsi-ml or sas transport for persistent boot device
11226 * purposes. Please refer to function _scsih_determine_boot_device()
11229 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11233 struct _sas_device *sas_device;
11234 struct _raid_device *raid_device;
11235 struct _pcie_device *pcie_device;
11237 u64 sas_address_parent;
11239 unsigned long flags;
11242 struct hba_port *port;
11244 /* no Bios, return immediately */
11245 if (!ioc->bios_pg3.BiosVersion)
11249 if (ioc->req_boot_device.device) {
11250 device = ioc->req_boot_device.device;
11251 channel = ioc->req_boot_device.channel;
11252 } else if (ioc->req_alt_boot_device.device) {
11253 device = ioc->req_alt_boot_device.device;
11254 channel = ioc->req_alt_boot_device.channel;
11255 } else if (ioc->current_boot_device.device) {
11256 device = ioc->current_boot_device.device;
11257 channel = ioc->current_boot_device.channel;
11263 if (channel == RAID_CHANNEL) {
11264 raid_device = device;
11265 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11266 raid_device->id, 0);
11268 _scsih_raid_device_remove(ioc, raid_device);
11269 } else if (channel == PCIE_CHANNEL) {
11270 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11271 pcie_device = device;
11272 tid = pcie_device->id;
11273 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11274 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11275 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11277 _scsih_pcie_device_remove(ioc, pcie_device);
11279 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11280 sas_device = device;
11281 handle = sas_device->handle;
11282 sas_address_parent = sas_device->sas_address_parent;
11283 sas_address = sas_device->sas_address;
11284 port = sas_device->port;
11285 list_move_tail(&sas_device->list, &ioc->sas_device_list);
11286 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11288 if (ioc->hide_drives)
11294 if (!mpt3sas_transport_port_add(ioc, handle,
11295 sas_address_parent, port)) {
11296 _scsih_sas_device_remove(ioc, sas_device);
11297 } else if (!sas_device->starget) {
11298 if (!ioc->is_driver_loading) {
11299 mpt3sas_transport_port_remove(ioc,
11301 sas_address_parent, port);
11302 _scsih_sas_device_remove(ioc, sas_device);
11309 * _scsih_probe_raid - reporting raid volumes to scsi-ml
11310 * @ioc: per adapter object
11312 * Called during initial loading of the driver.
11315 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11317 struct _raid_device *raid_device, *raid_next;
11320 list_for_each_entry_safe(raid_device, raid_next,
11321 &ioc->raid_device_list, list) {
11322 if (raid_device->starget)
11324 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11325 raid_device->id, 0);
11327 _scsih_raid_device_remove(ioc, raid_device);
11331 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11333 struct _sas_device *sas_device = NULL;
11334 unsigned long flags;
11336 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11337 if (!list_empty(&ioc->sas_device_init_list)) {
11338 sas_device = list_first_entry(&ioc->sas_device_init_list,
11339 struct _sas_device, list);
11340 sas_device_get(sas_device);
11342 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11347 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11348 struct _sas_device *sas_device)
11350 unsigned long flags;
11352 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11355 * Since we dropped the lock during the call to port_add(), we need to
11356 * be careful here that somebody else didn't move or delete this item
11357 * while we were busy with other things.
11359 * If it was on the list, we need a put() for the reference the list
11360 * had. Either way, we need a get() for the destination list.
11362 if (!list_empty(&sas_device->list)) {
11363 list_del_init(&sas_device->list);
11364 sas_device_put(sas_device);
11367 sas_device_get(sas_device);
11368 list_add_tail(&sas_device->list, &ioc->sas_device_list);
11370 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11374 * _scsih_probe_sas - reporting sas devices to sas transport
11375 * @ioc: per adapter object
11377 * Called during initial loading of the driver.
11380 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11382 struct _sas_device *sas_device;
11384 if (ioc->hide_drives)
11387 while ((sas_device = get_next_sas_device(ioc))) {
11388 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
11389 sas_device->sas_address_parent, sas_device->port)) {
11390 _scsih_sas_device_remove(ioc, sas_device);
11391 sas_device_put(sas_device);
11393 } else if (!sas_device->starget) {
11395 * When asyn scanning is enabled, its not possible to
11396 * remove devices while scanning is turned on due to an
11397 * oops in scsi_sysfs_add_sdev()->add_device()->
11398 * sysfs_addrm_start()
11400 if (!ioc->is_driver_loading) {
11401 mpt3sas_transport_port_remove(ioc,
11402 sas_device->sas_address,
11403 sas_device->sas_address_parent,
11405 _scsih_sas_device_remove(ioc, sas_device);
11406 sas_device_put(sas_device);
11410 sas_device_make_active(ioc, sas_device);
11411 sas_device_put(sas_device);
11416 * get_next_pcie_device - Get the next pcie device
11417 * @ioc: per adapter object
11419 * Get the next pcie device from pcie_device_init_list list.
11421 * Return: pcie device structure if pcie_device_init_list list is not empty
11422 * otherwise returns NULL
11424 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11426 struct _pcie_device *pcie_device = NULL;
11427 unsigned long flags;
11429 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11430 if (!list_empty(&ioc->pcie_device_init_list)) {
11431 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11432 struct _pcie_device, list);
11433 pcie_device_get(pcie_device);
11435 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11437 return pcie_device;
11441 * pcie_device_make_active - Add pcie device to pcie_device_list list
11442 * @ioc: per adapter object
11443 * @pcie_device: pcie device object
11445 * Add the pcie device which has registered with SCSI Transport Later to
11446 * pcie_device_list list
11448 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11449 struct _pcie_device *pcie_device)
11451 unsigned long flags;
11453 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11455 if (!list_empty(&pcie_device->list)) {
11456 list_del_init(&pcie_device->list);
11457 pcie_device_put(pcie_device);
11459 pcie_device_get(pcie_device);
11460 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11462 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11466 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
11467 * @ioc: per adapter object
11469 * Called during initial loading of the driver.
11472 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11474 struct _pcie_device *pcie_device;
11477 /* PCIe Device List */
11478 while ((pcie_device = get_next_pcie_device(ioc))) {
11479 if (pcie_device->starget) {
11480 pcie_device_put(pcie_device);
11483 if (pcie_device->access_status ==
11484 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11485 pcie_device_make_active(ioc, pcie_device);
11486 pcie_device_put(pcie_device);
11489 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11490 pcie_device->id, 0);
11492 _scsih_pcie_device_remove(ioc, pcie_device);
11493 pcie_device_put(pcie_device);
11495 } else if (!pcie_device->starget) {
11497 * When async scanning is enabled, its not possible to
11498 * remove devices while scanning is turned on due to an
11499 * oops in scsi_sysfs_add_sdev()->add_device()->
11500 * sysfs_addrm_start()
11502 if (!ioc->is_driver_loading) {
11503 /* TODO-- Need to find out whether this condition will
11506 _scsih_pcie_device_remove(ioc, pcie_device);
11507 pcie_device_put(pcie_device);
11511 pcie_device_make_active(ioc, pcie_device);
11512 pcie_device_put(pcie_device);
11517 * _scsih_probe_devices - probing for devices
11518 * @ioc: per adapter object
11520 * Called during initial loading of the driver.
11523 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11525 u16 volume_mapping_flags;
11527 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11528 return; /* return when IOC doesn't support initiator mode */
11530 _scsih_probe_boot_devices(ioc);
11532 if (ioc->ir_firmware) {
11533 volume_mapping_flags =
11534 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11535 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11536 if (volume_mapping_flags ==
11537 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11538 _scsih_probe_raid(ioc);
11539 _scsih_probe_sas(ioc);
11541 _scsih_probe_sas(ioc);
11542 _scsih_probe_raid(ioc);
11545 _scsih_probe_sas(ioc);
11546 _scsih_probe_pcie(ioc);
11551 * scsih_scan_start - scsi lld callback for .scan_start
11552 * @shost: SCSI host pointer
11554 * The shost has the ability to discover targets on its own instead
11555 * of scanning the entire bus. In our implemention, we will kick off
11556 * firmware discovery.
11559 scsih_scan_start(struct Scsi_Host *shost)
11561 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11563 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11564 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
11565 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11566 mpt3sas_enable_diag_buffer(ioc, 1);
11568 if (disable_discovery > 0)
11571 ioc->start_scan = 1;
11572 rc = mpt3sas_port_enable(ioc);
11575 ioc_info(ioc, "port enable: FAILED\n");
11579 * scsih_scan_finished - scsi lld callback for .scan_finished
11580 * @shost: SCSI host pointer
11581 * @time: elapsed time of the scan in jiffies
11583 * This function will be called periodicallyn until it returns 1 with the
11584 * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
11585 * we wait for firmware discovery to complete, then return 1.
11588 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
11590 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11592 if (disable_discovery > 0) {
11593 ioc->is_driver_loading = 0;
11594 ioc->wait_for_discovery_to_complete = 0;
11598 if (time >= (300 * HZ)) {
11599 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11600 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
11601 ioc->is_driver_loading = 0;
11605 if (ioc->start_scan)
11608 if (ioc->start_scan_failed) {
11609 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11610 ioc->start_scan_failed);
11611 ioc->is_driver_loading = 0;
11612 ioc->wait_for_discovery_to_complete = 0;
11613 ioc->remove_host = 1;
11617 ioc_info(ioc, "port enable: SUCCESS\n");
11618 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11620 if (ioc->wait_for_discovery_to_complete) {
11621 ioc->wait_for_discovery_to_complete = 0;
11622 _scsih_probe_devices(ioc);
11624 mpt3sas_base_start_watchdog(ioc);
11625 ioc->is_driver_loading = 0;
11630 * scsih_map_queues - map reply queues with request queues
11631 * @shost: SCSI host pointer
11633 static int scsih_map_queues(struct Scsi_Host *shost)
11635 struct MPT3SAS_ADAPTER *ioc =
11636 (struct MPT3SAS_ADAPTER *)shost->hostdata;
11638 if (ioc->shost->nr_hw_queues == 1)
11641 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
11642 ioc->pdev, ioc->high_iops_queues);
11645 /* shost template for SAS 2.0 HBA devices */
11646 static struct scsi_host_template mpt2sas_driver_template = {
11647 .module = THIS_MODULE,
11648 .name = "Fusion MPT SAS Host",
11649 .proc_name = MPT2SAS_DRIVER_NAME,
11650 .queuecommand = scsih_qcmd,
11651 .target_alloc = scsih_target_alloc,
11652 .slave_alloc = scsih_slave_alloc,
11653 .slave_configure = scsih_slave_configure,
11654 .target_destroy = scsih_target_destroy,
11655 .slave_destroy = scsih_slave_destroy,
11656 .scan_finished = scsih_scan_finished,
11657 .scan_start = scsih_scan_start,
11658 .change_queue_depth = scsih_change_queue_depth,
11659 .eh_abort_handler = scsih_abort,
11660 .eh_device_reset_handler = scsih_dev_reset,
11661 .eh_target_reset_handler = scsih_target_reset,
11662 .eh_host_reset_handler = scsih_host_reset,
11663 .bios_param = scsih_bios_param,
11666 .sg_tablesize = MPT2SAS_SG_DEPTH,
11667 .max_sectors = 32767,
11669 .shost_attrs = mpt3sas_host_attrs,
11670 .sdev_attrs = mpt3sas_dev_attrs,
11671 .track_queue_depth = 1,
11672 .cmd_size = sizeof(struct scsiio_tracker),
11675 /* raid transport support for SAS 2.0 HBA devices */
11676 static struct raid_function_template mpt2sas_raid_functions = {
11677 .cookie = &mpt2sas_driver_template,
11678 .is_raid = scsih_is_raid,
11679 .get_resync = scsih_get_resync,
11680 .get_state = scsih_get_state,
11683 /* shost template for SAS 3.0 HBA devices */
11684 static struct scsi_host_template mpt3sas_driver_template = {
11685 .module = THIS_MODULE,
11686 .name = "Fusion MPT SAS Host",
11687 .proc_name = MPT3SAS_DRIVER_NAME,
11688 .queuecommand = scsih_qcmd,
11689 .target_alloc = scsih_target_alloc,
11690 .slave_alloc = scsih_slave_alloc,
11691 .slave_configure = scsih_slave_configure,
11692 .target_destroy = scsih_target_destroy,
11693 .slave_destroy = scsih_slave_destroy,
11694 .scan_finished = scsih_scan_finished,
11695 .scan_start = scsih_scan_start,
11696 .change_queue_depth = scsih_change_queue_depth,
11697 .eh_abort_handler = scsih_abort,
11698 .eh_device_reset_handler = scsih_dev_reset,
11699 .eh_target_reset_handler = scsih_target_reset,
11700 .eh_host_reset_handler = scsih_host_reset,
11701 .bios_param = scsih_bios_param,
11704 .sg_tablesize = MPT3SAS_SG_DEPTH,
11705 .max_sectors = 32767,
11706 .max_segment_size = 0xffffffff,
11708 .shost_attrs = mpt3sas_host_attrs,
11709 .sdev_attrs = mpt3sas_dev_attrs,
11710 .track_queue_depth = 1,
11711 .cmd_size = sizeof(struct scsiio_tracker),
11712 .map_queues = scsih_map_queues,
11715 /* raid transport support for SAS 3.0 HBA devices */
11716 static struct raid_function_template mpt3sas_raid_functions = {
11717 .cookie = &mpt3sas_driver_template,
11718 .is_raid = scsih_is_raid,
11719 .get_resync = scsih_get_resync,
11720 .get_state = scsih_get_state,
11724 * _scsih_determine_hba_mpi_version - determine in which MPI version class
11725 * this device belongs to.
11726 * @pdev: PCI device struct
11728 * return MPI2_VERSION for SAS 2.0 HBA devices,
11729 * MPI25_VERSION for SAS 3.0 HBA devices, and
11730 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
11733 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
11736 switch (pdev->device) {
11737 case MPI2_MFGPAGE_DEVID_SSS6200:
11738 case MPI2_MFGPAGE_DEVID_SAS2004:
11739 case MPI2_MFGPAGE_DEVID_SAS2008:
11740 case MPI2_MFGPAGE_DEVID_SAS2108_1:
11741 case MPI2_MFGPAGE_DEVID_SAS2108_2:
11742 case MPI2_MFGPAGE_DEVID_SAS2108_3:
11743 case MPI2_MFGPAGE_DEVID_SAS2116_1:
11744 case MPI2_MFGPAGE_DEVID_SAS2116_2:
11745 case MPI2_MFGPAGE_DEVID_SAS2208_1:
11746 case MPI2_MFGPAGE_DEVID_SAS2208_2:
11747 case MPI2_MFGPAGE_DEVID_SAS2208_3:
11748 case MPI2_MFGPAGE_DEVID_SAS2208_4:
11749 case MPI2_MFGPAGE_DEVID_SAS2208_5:
11750 case MPI2_MFGPAGE_DEVID_SAS2208_6:
11751 case MPI2_MFGPAGE_DEVID_SAS2308_1:
11752 case MPI2_MFGPAGE_DEVID_SAS2308_2:
11753 case MPI2_MFGPAGE_DEVID_SAS2308_3:
11754 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
11755 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
11756 return MPI2_VERSION;
11757 case MPI25_MFGPAGE_DEVID_SAS3004:
11758 case MPI25_MFGPAGE_DEVID_SAS3008:
11759 case MPI25_MFGPAGE_DEVID_SAS3108_1:
11760 case MPI25_MFGPAGE_DEVID_SAS3108_2:
11761 case MPI25_MFGPAGE_DEVID_SAS3108_5:
11762 case MPI25_MFGPAGE_DEVID_SAS3108_6:
11763 return MPI25_VERSION;
11764 case MPI26_MFGPAGE_DEVID_SAS3216:
11765 case MPI26_MFGPAGE_DEVID_SAS3224:
11766 case MPI26_MFGPAGE_DEVID_SAS3316_1:
11767 case MPI26_MFGPAGE_DEVID_SAS3316_2:
11768 case MPI26_MFGPAGE_DEVID_SAS3316_3:
11769 case MPI26_MFGPAGE_DEVID_SAS3316_4:
11770 case MPI26_MFGPAGE_DEVID_SAS3324_1:
11771 case MPI26_MFGPAGE_DEVID_SAS3324_2:
11772 case MPI26_MFGPAGE_DEVID_SAS3324_3:
11773 case MPI26_MFGPAGE_DEVID_SAS3324_4:
11774 case MPI26_MFGPAGE_DEVID_SAS3508:
11775 case MPI26_MFGPAGE_DEVID_SAS3508_1:
11776 case MPI26_MFGPAGE_DEVID_SAS3408:
11777 case MPI26_MFGPAGE_DEVID_SAS3516:
11778 case MPI26_MFGPAGE_DEVID_SAS3516_1:
11779 case MPI26_MFGPAGE_DEVID_SAS3416:
11780 case MPI26_MFGPAGE_DEVID_SAS3616:
11781 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
11782 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
11783 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
11784 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
11785 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
11786 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
11787 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
11788 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
11789 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
11790 return MPI26_VERSION;
11796 * _scsih_probe - attach and add scsi host
11797 * @pdev: PCI device struct
11798 * @id: pci device id
11800 * Return: 0 success, anything else error.
11803 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
11805 struct MPT3SAS_ADAPTER *ioc;
11806 struct Scsi_Host *shost = NULL;
11808 u16 hba_mpi_version;
11810 /* Determine in which MPI version class this pci device belongs */
11811 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
11812 if (hba_mpi_version == 0)
11815 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
11816 * for other generation HBA's return with -ENODEV
11818 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
11821 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
11822 * for other generation HBA's return with -ENODEV
11824 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
11825 || hba_mpi_version == MPI26_VERSION)))
11828 switch (hba_mpi_version) {
11830 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
11831 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
11832 /* Use mpt2sas driver host template for SAS 2.0 HBA's */
11833 shost = scsi_host_alloc(&mpt2sas_driver_template,
11834 sizeof(struct MPT3SAS_ADAPTER));
11837 ioc = shost_priv(shost);
11838 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
11839 ioc->hba_mpi_version_belonged = hba_mpi_version;
11840 ioc->id = mpt2_ids++;
11841 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
11842 switch (pdev->device) {
11843 case MPI2_MFGPAGE_DEVID_SSS6200:
11844 ioc->is_warpdrive = 1;
11845 ioc->hide_ir_msg = 1;
11847 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
11848 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
11849 ioc->is_mcpu_endpoint = 1;
11852 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
11856 if (multipath_on_hba == -1 || multipath_on_hba == 0)
11857 ioc->multipath_on_hba = 0;
11859 ioc->multipath_on_hba = 1;
11862 case MPI25_VERSION:
11863 case MPI26_VERSION:
11864 /* Use mpt3sas driver host template for SAS 3.0 HBA's */
11865 shost = scsi_host_alloc(&mpt3sas_driver_template,
11866 sizeof(struct MPT3SAS_ADAPTER));
11869 ioc = shost_priv(shost);
11870 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
11871 ioc->hba_mpi_version_belonged = hba_mpi_version;
11872 ioc->id = mpt3_ids++;
11873 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
11874 switch (pdev->device) {
11875 case MPI26_MFGPAGE_DEVID_SAS3508:
11876 case MPI26_MFGPAGE_DEVID_SAS3508_1:
11877 case MPI26_MFGPAGE_DEVID_SAS3408:
11878 case MPI26_MFGPAGE_DEVID_SAS3516:
11879 case MPI26_MFGPAGE_DEVID_SAS3516_1:
11880 case MPI26_MFGPAGE_DEVID_SAS3416:
11881 case MPI26_MFGPAGE_DEVID_SAS3616:
11882 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
11883 ioc->is_gen35_ioc = 1;
11885 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
11886 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
11887 dev_err(&pdev->dev,
11888 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
11889 pdev->device, pdev->subsystem_vendor,
11890 pdev->subsystem_device);
11892 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
11893 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
11894 dev_err(&pdev->dev,
11895 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
11896 pdev->device, pdev->subsystem_vendor,
11897 pdev->subsystem_device);
11899 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
11900 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
11901 dev_info(&pdev->dev,
11902 "HBA is in Configurable Secure mode\n");
11904 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
11905 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
11906 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
11909 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
11911 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
11912 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
11913 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
11914 ioc->combined_reply_queue = 1;
11915 if (ioc->is_gen35_ioc)
11916 ioc->combined_reply_index_count =
11917 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
11919 ioc->combined_reply_index_count =
11920 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
11923 switch (ioc->is_gen35_ioc) {
11925 if (multipath_on_hba == -1 || multipath_on_hba == 0)
11926 ioc->multipath_on_hba = 0;
11928 ioc->multipath_on_hba = 1;
11931 if (multipath_on_hba == -1 || multipath_on_hba > 0)
11932 ioc->multipath_on_hba = 1;
11934 ioc->multipath_on_hba = 0;
11944 INIT_LIST_HEAD(&ioc->list);
11945 spin_lock(&gioc_lock);
11946 list_add_tail(&ioc->list, &mpt3sas_ioc_list);
11947 spin_unlock(&gioc_lock);
11948 ioc->shost = shost;
11950 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
11951 ioc->tm_cb_idx = tm_cb_idx;
11952 ioc->ctl_cb_idx = ctl_cb_idx;
11953 ioc->base_cb_idx = base_cb_idx;
11954 ioc->port_enable_cb_idx = port_enable_cb_idx;
11955 ioc->transport_cb_idx = transport_cb_idx;
11956 ioc->scsih_cb_idx = scsih_cb_idx;
11957 ioc->config_cb_idx = config_cb_idx;
11958 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
11959 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
11960 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
11961 ioc->logging_level = logging_level;
11962 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
11963 /* Host waits for minimum of six seconds */
11964 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
11966 * Enable MEMORY MOVE support flag.
11968 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
11969 /* Enable ADDITIONAL QUERY support flag. */
11970 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
11972 ioc->enable_sdev_max_qd = enable_sdev_max_qd;
11974 /* misc semaphores and spin locks */
11975 mutex_init(&ioc->reset_in_progress_mutex);
11976 /* initializing pci_access_mutex lock */
11977 mutex_init(&ioc->pci_access_mutex);
11978 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
11979 spin_lock_init(&ioc->scsi_lookup_lock);
11980 spin_lock_init(&ioc->sas_device_lock);
11981 spin_lock_init(&ioc->sas_node_lock);
11982 spin_lock_init(&ioc->fw_event_lock);
11983 spin_lock_init(&ioc->raid_device_lock);
11984 spin_lock_init(&ioc->pcie_device_lock);
11985 spin_lock_init(&ioc->diag_trigger_lock);
11987 INIT_LIST_HEAD(&ioc->sas_device_list);
11988 INIT_LIST_HEAD(&ioc->sas_device_init_list);
11989 INIT_LIST_HEAD(&ioc->sas_expander_list);
11990 INIT_LIST_HEAD(&ioc->enclosure_list);
11991 INIT_LIST_HEAD(&ioc->pcie_device_list);
11992 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
11993 INIT_LIST_HEAD(&ioc->fw_event_list);
11994 INIT_LIST_HEAD(&ioc->raid_device_list);
11995 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
11996 INIT_LIST_HEAD(&ioc->delayed_tr_list);
11997 INIT_LIST_HEAD(&ioc->delayed_sc_list);
11998 INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
11999 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
12000 INIT_LIST_HEAD(&ioc->reply_queue_list);
12001 INIT_LIST_HEAD(&ioc->port_table_list);
12003 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
12005 /* init shost parameters */
12006 shost->max_cmd_len = 32;
12007 shost->max_lun = max_lun;
12008 shost->transportt = mpt3sas_transport_template;
12009 shost->unique_id = ioc->id;
12011 if (ioc->is_mcpu_endpoint) {
12012 /* mCPU MPI support 64K max IO */
12013 shost->max_sectors = 128;
12014 ioc_info(ioc, "The max_sectors value is set to %d\n",
12015 shost->max_sectors);
12017 if (max_sectors != 0xFFFF) {
12018 if (max_sectors < 64) {
12019 shost->max_sectors = 64;
12020 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
12022 } else if (max_sectors > 32767) {
12023 shost->max_sectors = 32767;
12024 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
12027 shost->max_sectors = max_sectors & 0xFFFE;
12028 ioc_info(ioc, "The max_sectors value is set to %d\n",
12029 shost->max_sectors);
12033 /* register EEDP capabilities with SCSI layer */
12034 if (prot_mask >= 0)
12035 scsi_host_set_prot(shost, (prot_mask & 0x07));
12037 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
12038 | SHOST_DIF_TYPE2_PROTECTION
12039 | SHOST_DIF_TYPE3_PROTECTION);
12041 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
12044 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
12045 "fw_event_%s%d", ioc->driver_name, ioc->id);
12046 ioc->firmware_event_thread = alloc_ordered_workqueue(
12047 ioc->firmware_event_name, 0);
12048 if (!ioc->firmware_event_thread) {
12049 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12050 __FILE__, __LINE__, __func__);
12052 goto out_thread_fail;
12055 ioc->is_driver_loading = 1;
12056 if ((mpt3sas_base_attach(ioc))) {
12057 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12058 __FILE__, __LINE__, __func__);
12060 goto out_attach_fail;
12063 if (ioc->is_warpdrive) {
12064 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
12065 ioc->hide_drives = 0;
12066 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
12067 ioc->hide_drives = 1;
12069 if (mpt3sas_get_num_volumes(ioc))
12070 ioc->hide_drives = 1;
12072 ioc->hide_drives = 0;
12075 ioc->hide_drives = 0;
12077 shost->host_tagset = 0;
12078 shost->nr_hw_queues = 1;
12080 if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 &&
12081 host_tagset_enable && ioc->smp_affinity_enable) {
12083 shost->host_tagset = 1;
12084 shost->nr_hw_queues =
12085 ioc->reply_queue_count - ioc->high_iops_queues;
12087 dev_info(&ioc->pdev->dev,
12088 "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
12089 shost->can_queue, shost->nr_hw_queues);
12092 rv = scsi_add_host(shost, &pdev->dev);
12094 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12095 __FILE__, __LINE__, __func__);
12096 goto out_add_shost_fail;
12099 scsi_scan_host(shost);
12100 mpt3sas_setup_debugfs(ioc);
12102 out_add_shost_fail:
12103 mpt3sas_base_detach(ioc);
12105 destroy_workqueue(ioc->firmware_event_thread);
12107 spin_lock(&gioc_lock);
12108 list_del(&ioc->list);
12109 spin_unlock(&gioc_lock);
12110 scsi_host_put(shost);
12115 * scsih_suspend - power management suspend main entry point
12116 * @dev: Device struct
12118 * Return: 0 success, anything else error.
12120 static int __maybe_unused
12121 scsih_suspend(struct device *dev)
12123 struct pci_dev *pdev = to_pci_dev(dev);
12124 struct Scsi_Host *shost;
12125 struct MPT3SAS_ADAPTER *ioc;
12128 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12132 mpt3sas_base_stop_watchdog(ioc);
12133 flush_scheduled_work();
12134 scsi_block_requests(shost);
12135 _scsih_nvme_shutdown(ioc);
12136 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12137 pdev, pci_name(pdev));
12139 mpt3sas_base_free_resources(ioc);
12144 * scsih_resume - power management resume main entry point
12145 * @dev: Device struct
12147 * Return: 0 success, anything else error.
12149 static int __maybe_unused
12150 scsih_resume(struct device *dev)
12152 struct pci_dev *pdev = to_pci_dev(dev);
12153 struct Scsi_Host *shost;
12154 struct MPT3SAS_ADAPTER *ioc;
12155 pci_power_t device_state = pdev->current_state;
12158 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12162 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12163 pdev, pci_name(pdev), device_state);
12166 r = mpt3sas_base_map_resources(ioc);
12169 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
12170 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
12171 scsi_unblock_requests(shost);
12172 mpt3sas_base_start_watchdog(ioc);
12177 * scsih_pci_error_detected - Called when a PCI error is detected.
12178 * @pdev: PCI device struct
12179 * @state: PCI channel state
12181 * Description: Called when a PCI error is detected.
12183 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
12185 static pci_ers_result_t
12186 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12188 struct Scsi_Host *shost;
12189 struct MPT3SAS_ADAPTER *ioc;
12191 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12192 return PCI_ERS_RESULT_DISCONNECT;
12194 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
12197 case pci_channel_io_normal:
12198 return PCI_ERS_RESULT_CAN_RECOVER;
12199 case pci_channel_io_frozen:
12200 /* Fatal error, prepare for slot reset */
12201 ioc->pci_error_recovery = 1;
12202 scsi_block_requests(ioc->shost);
12203 mpt3sas_base_stop_watchdog(ioc);
12204 mpt3sas_base_free_resources(ioc);
12205 return PCI_ERS_RESULT_NEED_RESET;
12206 case pci_channel_io_perm_failure:
12207 /* Permanent error, prepare for device removal */
12208 ioc->pci_error_recovery = 1;
12209 mpt3sas_base_stop_watchdog(ioc);
12210 _scsih_flush_running_cmds(ioc);
12211 return PCI_ERS_RESULT_DISCONNECT;
12213 return PCI_ERS_RESULT_NEED_RESET;
12217 * scsih_pci_slot_reset - Called when PCI slot has been reset.
12218 * @pdev: PCI device struct
12220 * Description: This routine is called by the pci error recovery
12221 * code after the PCI slot has been reset, just before we
12222 * should resume normal operations.
12224 static pci_ers_result_t
12225 scsih_pci_slot_reset(struct pci_dev *pdev)
12227 struct Scsi_Host *shost;
12228 struct MPT3SAS_ADAPTER *ioc;
12231 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12232 return PCI_ERS_RESULT_DISCONNECT;
12234 ioc_info(ioc, "PCI error: slot reset callback!!\n");
12236 ioc->pci_error_recovery = 0;
12238 pci_restore_state(pdev);
12239 rc = mpt3sas_base_map_resources(ioc);
12241 return PCI_ERS_RESULT_DISCONNECT;
12243 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
12244 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
12246 ioc_warn(ioc, "hard reset: %s\n",
12247 (rc == 0) ? "success" : "failed");
12250 return PCI_ERS_RESULT_RECOVERED;
12252 return PCI_ERS_RESULT_DISCONNECT;
12256 * scsih_pci_resume() - resume normal ops after PCI reset
12257 * @pdev: pointer to PCI device
12259 * Called when the error recovery driver tells us that its
12260 * OK to resume normal operation. Use completion to allow
12261 * halted scsi ops to resume.
12264 scsih_pci_resume(struct pci_dev *pdev)
12266 struct Scsi_Host *shost;
12267 struct MPT3SAS_ADAPTER *ioc;
12269 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12272 ioc_info(ioc, "PCI error: resume callback!!\n");
12274 mpt3sas_base_start_watchdog(ioc);
12275 scsi_unblock_requests(ioc->shost);
12279 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
12280 * @pdev: pointer to PCI device
12282 static pci_ers_result_t
12283 scsih_pci_mmio_enabled(struct pci_dev *pdev)
12285 struct Scsi_Host *shost;
12286 struct MPT3SAS_ADAPTER *ioc;
12288 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12289 return PCI_ERS_RESULT_DISCONNECT;
12291 ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
12293 /* TODO - dump whatever for debugging purposes */
12295 /* This called only if scsih_pci_error_detected returns
12296 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
12297 * works, no need to reset slot.
12299 return PCI_ERS_RESULT_RECOVERED;
12303 * scsih_ncq_prio_supp - Check for NCQ command priority support
12304 * @sdev: scsi device struct
12306 * This is called when a user indicates they would like to enable
12307 * ncq command priorities. This works only on SATA devices.
12309 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
12311 unsigned char *buf;
12312 bool ncq_prio_supp = false;
12314 if (!scsi_device_supports_vpd(sdev))
12315 return ncq_prio_supp;
12317 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
12319 return ncq_prio_supp;
12321 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
12322 ncq_prio_supp = (buf[213] >> 4) & 1;
12325 return ncq_prio_supp;
12328 * The pci device ids are defined in mpi/mpi2_cnfg.h.
12330 static const struct pci_device_id mpt3sas_pci_table[] = {
12331 /* Spitfire ~ 2004 */
12332 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12333 PCI_ANY_ID, PCI_ANY_ID },
12334 /* Falcon ~ 2008 */
12335 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12336 PCI_ANY_ID, PCI_ANY_ID },
12337 /* Liberator ~ 2108 */
12338 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12339 PCI_ANY_ID, PCI_ANY_ID },
12340 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12341 PCI_ANY_ID, PCI_ANY_ID },
12342 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12343 PCI_ANY_ID, PCI_ANY_ID },
12344 /* Meteor ~ 2116 */
12345 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12346 PCI_ANY_ID, PCI_ANY_ID },
12347 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12348 PCI_ANY_ID, PCI_ANY_ID },
12349 /* Thunderbolt ~ 2208 */
12350 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12351 PCI_ANY_ID, PCI_ANY_ID },
12352 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12353 PCI_ANY_ID, PCI_ANY_ID },
12354 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12355 PCI_ANY_ID, PCI_ANY_ID },
12356 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12357 PCI_ANY_ID, PCI_ANY_ID },
12358 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12359 PCI_ANY_ID, PCI_ANY_ID },
12360 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12361 PCI_ANY_ID, PCI_ANY_ID },
12362 /* Mustang ~ 2308 */
12363 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12364 PCI_ANY_ID, PCI_ANY_ID },
12365 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12366 PCI_ANY_ID, PCI_ANY_ID },
12367 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12368 PCI_ANY_ID, PCI_ANY_ID },
12369 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
12370 PCI_ANY_ID, PCI_ANY_ID },
12371 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12372 PCI_ANY_ID, PCI_ANY_ID },
12374 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12375 PCI_ANY_ID, PCI_ANY_ID },
12376 /* Fury ~ 3004 and 3008 */
12377 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12378 PCI_ANY_ID, PCI_ANY_ID },
12379 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12380 PCI_ANY_ID, PCI_ANY_ID },
12381 /* Invader ~ 3108 */
12382 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12383 PCI_ANY_ID, PCI_ANY_ID },
12384 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12385 PCI_ANY_ID, PCI_ANY_ID },
12386 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12387 PCI_ANY_ID, PCI_ANY_ID },
12388 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12389 PCI_ANY_ID, PCI_ANY_ID },
12390 /* Cutlass ~ 3216 and 3224 */
12391 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12392 PCI_ANY_ID, PCI_ANY_ID },
12393 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12394 PCI_ANY_ID, PCI_ANY_ID },
12395 /* Intruder ~ 3316 and 3324 */
12396 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12397 PCI_ANY_ID, PCI_ANY_ID },
12398 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12399 PCI_ANY_ID, PCI_ANY_ID },
12400 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12401 PCI_ANY_ID, PCI_ANY_ID },
12402 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12403 PCI_ANY_ID, PCI_ANY_ID },
12404 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12405 PCI_ANY_ID, PCI_ANY_ID },
12406 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12407 PCI_ANY_ID, PCI_ANY_ID },
12408 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12409 PCI_ANY_ID, PCI_ANY_ID },
12410 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12411 PCI_ANY_ID, PCI_ANY_ID },
12412 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
12413 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12414 PCI_ANY_ID, PCI_ANY_ID },
12415 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12416 PCI_ANY_ID, PCI_ANY_ID },
12417 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12418 PCI_ANY_ID, PCI_ANY_ID },
12419 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12420 PCI_ANY_ID, PCI_ANY_ID },
12421 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12422 PCI_ANY_ID, PCI_ANY_ID },
12423 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12424 PCI_ANY_ID, PCI_ANY_ID },
12425 /* Mercator ~ 3616*/
12426 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12427 PCI_ANY_ID, PCI_ANY_ID },
12429 /* Aero SI 0x00E1 Configurable Secure
12430 * 0x00E2 Hard Secure
12432 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12433 PCI_ANY_ID, PCI_ANY_ID },
12434 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12435 PCI_ANY_ID, PCI_ANY_ID },
12438 * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
12440 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12441 PCI_ANY_ID, PCI_ANY_ID },
12442 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12443 PCI_ANY_ID, PCI_ANY_ID },
12445 /* Atlas PCIe Switch Management Port */
12446 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12447 PCI_ANY_ID, PCI_ANY_ID },
12449 /* Sea SI 0x00E5 Configurable Secure
12450 * 0x00E6 Hard Secure
12452 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12453 PCI_ANY_ID, PCI_ANY_ID },
12454 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12455 PCI_ANY_ID, PCI_ANY_ID },
12458 * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
12460 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12461 PCI_ANY_ID, PCI_ANY_ID },
12462 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12463 PCI_ANY_ID, PCI_ANY_ID },
12465 {0} /* Terminating entry */
12467 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12469 static struct pci_error_handlers _mpt3sas_err_handler = {
12470 .error_detected = scsih_pci_error_detected,
12471 .mmio_enabled = scsih_pci_mmio_enabled,
12472 .slot_reset = scsih_pci_slot_reset,
12473 .resume = scsih_pci_resume,
12476 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12478 static struct pci_driver mpt3sas_driver = {
12479 .name = MPT3SAS_DRIVER_NAME,
12480 .id_table = mpt3sas_pci_table,
12481 .probe = _scsih_probe,
12482 .remove = scsih_remove,
12483 .shutdown = scsih_shutdown,
12484 .err_handler = &_mpt3sas_err_handler,
12485 .driver.pm = &scsih_pm_ops,
12489 * scsih_init - main entry point for this driver.
12491 * Return: 0 success, anything else error.
12499 mpt3sas_base_initialize_callback_handler();
12501 /* queuecommand callback hander */
12502 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12504 /* task management callback handler */
12505 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12507 /* base internal commands callback handler */
12508 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12509 port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12510 mpt3sas_port_enable_done);
12512 /* transport internal commands callback handler */
12513 transport_cb_idx = mpt3sas_base_register_callback_handler(
12514 mpt3sas_transport_done);
12516 /* scsih internal commands callback handler */
12517 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12519 /* configuration page API internal commands callback handler */
12520 config_cb_idx = mpt3sas_base_register_callback_handler(
12521 mpt3sas_config_done);
12523 /* ctl module callback handler */
12524 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12526 tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12527 _scsih_tm_tr_complete);
12529 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12530 _scsih_tm_volume_tr_complete);
12532 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12533 _scsih_sas_control_complete);
12535 mpt3sas_init_debugfs();
12540 * scsih_exit - exit point for this driver (when it is a module).
12542 * Return: 0 success, anything else error.
12548 mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12549 mpt3sas_base_release_callback_handler(tm_cb_idx);
12550 mpt3sas_base_release_callback_handler(base_cb_idx);
12551 mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12552 mpt3sas_base_release_callback_handler(transport_cb_idx);
12553 mpt3sas_base_release_callback_handler(scsih_cb_idx);
12554 mpt3sas_base_release_callback_handler(config_cb_idx);
12555 mpt3sas_base_release_callback_handler(ctl_cb_idx);
12557 mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12558 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12559 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12561 /* raid transport support */
12562 if (hbas_to_enumerate != 1)
12563 raid_class_release(mpt3sas_raid_template);
12564 if (hbas_to_enumerate != 2)
12565 raid_class_release(mpt2sas_raid_template);
12566 sas_release_transport(mpt3sas_transport_template);
12567 mpt3sas_exit_debugfs();
12571 * _mpt3sas_init - main entry point for this driver.
12573 * Return: 0 success, anything else error.
12576 _mpt3sas_init(void)
12580 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12581 MPT3SAS_DRIVER_VERSION);
12583 mpt3sas_transport_template =
12584 sas_attach_transport(&mpt3sas_transport_functions);
12585 if (!mpt3sas_transport_template)
12588 /* No need attach mpt3sas raid functions template
12589 * if hbas_to_enumarate value is one.
12591 if (hbas_to_enumerate != 1) {
12592 mpt3sas_raid_template =
12593 raid_class_attach(&mpt3sas_raid_functions);
12594 if (!mpt3sas_raid_template) {
12595 sas_release_transport(mpt3sas_transport_template);
12600 /* No need to attach mpt2sas raid functions template
12601 * if hbas_to_enumarate value is two
12603 if (hbas_to_enumerate != 2) {
12604 mpt2sas_raid_template =
12605 raid_class_attach(&mpt2sas_raid_functions);
12606 if (!mpt2sas_raid_template) {
12607 sas_release_transport(mpt3sas_transport_template);
12612 error = scsih_init();
12618 mpt3sas_ctl_init(hbas_to_enumerate);
12620 error = pci_register_driver(&mpt3sas_driver);
12628 * _mpt3sas_exit - exit point for this driver (when it is a module).
12632 _mpt3sas_exit(void)
12634 pr_info("mpt3sas version %s unloading\n",
12635 MPT3SAS_DRIVER_VERSION);
12637 mpt3sas_ctl_exit(hbas_to_enumerate);
12639 pci_unregister_driver(&mpt3sas_driver);
12644 module_init(_mpt3sas_init);
12645 module_exit(_mpt3sas_exit);