2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5 * Copyright (C) 2012-2014 LSI Corporation
6 * Copyright (C) 2013-2014 Avago Technologies
7 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/aer.h>
56 #include <linux/raid_class.h>
57 #include <asm/unaligned.h>
59 #include "mpt3sas_base.h"
61 #define RAID_CHANNEL 1
63 #define PCIE_CHANNEL 2
66 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
67 struct _sas_node *sas_expander);
68 static void _firmware_event_work(struct work_struct *work);
70 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
71 struct _sas_device *sas_device);
72 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
73 u8 retry_count, u8 is_pd);
74 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
75 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
76 struct _pcie_device *pcie_device);
78 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
79 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
81 /* global parameters */
82 LIST_HEAD(mpt3sas_ioc_list);
83 /* global ioc lock for list operations */
84 DEFINE_SPINLOCK(gioc_lock);
86 MODULE_AUTHOR(MPT3SAS_AUTHOR);
87 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
90 MODULE_ALIAS("mpt2sas");
92 /* local parameters */
93 static u8 scsi_io_cb_idx = -1;
94 static u8 tm_cb_idx = -1;
95 static u8 ctl_cb_idx = -1;
96 static u8 base_cb_idx = -1;
97 static u8 port_enable_cb_idx = -1;
98 static u8 transport_cb_idx = -1;
99 static u8 scsih_cb_idx = -1;
100 static u8 config_cb_idx = -1;
104 static u8 tm_tr_cb_idx = -1 ;
105 static u8 tm_tr_volume_cb_idx = -1 ;
106 static u8 tm_sas_control_cb_idx = -1;
108 /* command line options */
109 static u32 logging_level;
110 MODULE_PARM_DESC(logging_level,
111 " bits for enabling additional logging info (default=0)");
114 static ushort max_sectors = 0xFFFF;
115 module_param(max_sectors, ushort, 0444);
116 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
119 static int missing_delay[2] = {-1, -1};
120 module_param_array(missing_delay, int, NULL, 0444);
121 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
123 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
124 #define MPT3SAS_MAX_LUN (16895)
125 static u64 max_lun = MPT3SAS_MAX_LUN;
126 module_param(max_lun, ullong, 0444);
127 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
129 static ushort hbas_to_enumerate;
130 module_param(hbas_to_enumerate, ushort, 0444);
131 MODULE_PARM_DESC(hbas_to_enumerate,
132 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
133 1 - enumerates only SAS 2.0 generation HBAs\n \
134 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
136 /* diag_buffer_enable is bitwise
138 * bit 1 set = SNAPSHOT
139 * bit 2 set = EXTENDED
141 * Either bit can be set, or both
143 static int diag_buffer_enable = -1;
144 module_param(diag_buffer_enable, int, 0444);
145 MODULE_PARM_DESC(diag_buffer_enable,
146 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
147 static int disable_discovery = -1;
148 module_param(disable_discovery, int, 0444);
149 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
152 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
153 static int prot_mask = -1;
154 module_param(prot_mask, int, 0444);
155 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
157 static bool enable_sdev_max_qd;
158 module_param(enable_sdev_max_qd, bool, 0444);
159 MODULE_PARM_DESC(enable_sdev_max_qd,
160 "Enable sdev max qd as can_queue, def=disabled(0)");
162 static int multipath_on_hba = -1;
163 module_param(multipath_on_hba, int, 0);
164 MODULE_PARM_DESC(multipath_on_hba,
165 "Multipath support to add same target device\n\t\t"
166 "as many times as it is visible to HBA from various paths\n\t\t"
168 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
169 "\t SAS 3.5 HBA - This will be enabled)");
171 /* raid transport support */
172 static struct raid_template *mpt3sas_raid_template;
173 static struct raid_template *mpt2sas_raid_template;
177 * struct sense_info - common structure for obtaining sense keys
179 * @asc: additional sense code
180 * @ascq: additional sense code qualifier
188 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
189 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
190 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
191 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
192 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
194 * struct fw_event_work - firmware event struct
195 * @list: link list framework
196 * @work: work object (ioc->fault_reset_work_q)
197 * @ioc: per adapter object
198 * @device_handle: device handle
199 * @VF_ID: virtual function id
200 * @VP_ID: virtual port id
201 * @ignore: flag meaning this event has been marked to ignore
202 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
203 * @refcount: kref for this event
204 * @event_data: reply event data payload follows
206 * This object stored on ioc->fw_event_list.
208 struct fw_event_work {
209 struct list_head list;
210 struct work_struct work;
212 struct MPT3SAS_ADAPTER *ioc;
218 struct kref refcount;
219 char event_data[] __aligned(4);
222 static void fw_event_work_free(struct kref *r)
224 kfree(container_of(r, struct fw_event_work, refcount));
227 static void fw_event_work_get(struct fw_event_work *fw_work)
229 kref_get(&fw_work->refcount);
232 static void fw_event_work_put(struct fw_event_work *fw_work)
234 kref_put(&fw_work->refcount, fw_event_work_free);
237 static struct fw_event_work *alloc_fw_event_work(int len)
239 struct fw_event_work *fw_event;
241 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
245 kref_init(&fw_event->refcount);
250 * struct _scsi_io_transfer - scsi io transfer
251 * @handle: sas device handle (assigned by firmware)
252 * @is_raid: flag set for hidden raid components
253 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
254 * @data_length: data transfer length
255 * @data_dma: dma pointer to data
258 * @cdb_length: cdb length
260 * @timeout: timeout for this command
261 * @VF_ID: virtual function id
262 * @VP_ID: virtual port id
263 * @valid_reply: flag set for reply message
264 * @sense_length: sense length
265 * @ioc_status: ioc status
266 * @scsi_state: scsi state
267 * @scsi_status: scsi staus
268 * @log_info: log information
269 * @transfer_length: data length transfer when there is a reply message
271 * Used for sending internal scsi commands to devices within this module.
272 * Refer to _scsi_send_scsi_io().
274 struct _scsi_io_transfer {
277 enum dma_data_direction dir;
280 u8 sense[SCSI_SENSE_BUFFERSIZE];
288 /* the following bits are only valid when 'valid_reply = 1' */
298 * _scsih_set_debug_level - global setting of ioc->logging_level.
302 * Note: The logging levels are defined in mpt3sas_debug.h.
305 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
307 int ret = param_set_int(val, kp);
308 struct MPT3SAS_ADAPTER *ioc;
313 pr_info("setting logging_level(0x%08x)\n", logging_level);
314 spin_lock(&gioc_lock);
315 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
316 ioc->logging_level = logging_level;
317 spin_unlock(&gioc_lock);
320 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
321 &logging_level, 0644);
324 * _scsih_srch_boot_sas_address - search based on sas_address
325 * @sas_address: sas address
326 * @boot_device: boot device object from bios page 2
328 * Return: 1 when there's a match, 0 means no match.
331 _scsih_srch_boot_sas_address(u64 sas_address,
332 Mpi2BootDeviceSasWwid_t *boot_device)
334 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
338 * _scsih_srch_boot_device_name - search based on device name
339 * @device_name: device name specified in INDENTIFY fram
340 * @boot_device: boot device object from bios page 2
342 * Return: 1 when there's a match, 0 means no match.
345 _scsih_srch_boot_device_name(u64 device_name,
346 Mpi2BootDeviceDeviceName_t *boot_device)
348 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
352 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
353 * @enclosure_logical_id: enclosure logical id
354 * @slot_number: slot number
355 * @boot_device: boot device object from bios page 2
357 * Return: 1 when there's a match, 0 means no match.
360 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
361 Mpi2BootDeviceEnclosureSlot_t *boot_device)
363 return (enclosure_logical_id == le64_to_cpu(boot_device->
364 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
365 SlotNumber)) ? 1 : 0;
369 * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
370 * port number from port list
371 * @ioc: per adapter object
372 * @port_id: port number
373 * @bypass_dirty_port_flag: when set look the matching hba port entry even
374 * if hba port entry is marked as dirty.
376 * Search for hba port entry corresponding to provided port number,
377 * if available return port object otherwise return NULL.
380 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
381 u8 port_id, u8 bypass_dirty_port_flag)
383 struct hba_port *port, *port_next;
386 * When multipath_on_hba is disabled then
387 * search the hba_port entry using default
390 if (!ioc->multipath_on_hba)
391 port_id = MULTIPATH_DISABLED_PORT_ID;
393 list_for_each_entry_safe(port, port_next,
394 &ioc->port_table_list, list) {
395 if (port->port_id != port_id)
397 if (bypass_dirty_port_flag)
399 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
405 * Allocate hba_port object for default port id (i.e. 255)
406 * when multipath_on_hba is disabled for the HBA.
407 * And add this object to port_table_list.
409 if (!ioc->multipath_on_hba) {
410 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
414 port->port_id = port_id;
416 "hba_port entry: %p, port: %d is added to hba_port list\n",
417 port, port->port_id);
418 list_add_tail(&port->list,
419 &ioc->port_table_list);
426 * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
427 * @ioc: per adapter object
428 * @port: hba_port object
431 * Return virtual_phy object corresponding to phy number.
434 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
435 struct hba_port *port, u32 phy)
437 struct virtual_phy *vphy, *vphy_next;
439 if (!port->vphys_mask)
442 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
443 if (vphy->phy_mask & (1 << phy))
450 * _scsih_is_boot_device - search for matching boot device.
451 * @sas_address: sas address
452 * @device_name: device name specified in INDENTIFY fram
453 * @enclosure_logical_id: enclosure logical id
455 * @form: specifies boot device form
456 * @boot_device: boot device object from bios page 2
458 * Return: 1 when there's a match, 0 means no match.
461 _scsih_is_boot_device(u64 sas_address, u64 device_name,
462 u64 enclosure_logical_id, u16 slot, u8 form,
463 Mpi2BiosPage2BootDevice_t *boot_device)
468 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
471 rc = _scsih_srch_boot_sas_address(
472 sas_address, &boot_device->SasWwid);
474 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
475 if (!enclosure_logical_id)
477 rc = _scsih_srch_boot_encl_slot(
478 enclosure_logical_id,
479 slot, &boot_device->EnclosureSlot);
481 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
484 rc = _scsih_srch_boot_device_name(
485 device_name, &boot_device->DeviceName);
487 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
495 * _scsih_get_sas_address - set the sas_address for given device handle
497 * @handle: device handle
498 * @sas_address: sas address
500 * Return: 0 success, non-zero when failure
503 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
506 Mpi2SasDevicePage0_t sas_device_pg0;
507 Mpi2ConfigReply_t mpi_reply;
512 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
513 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
514 ioc_err(ioc, "failure at %s:%d/%s()!\n",
515 __FILE__, __LINE__, __func__);
519 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
520 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
521 /* For HBA, vSES doesn't return HBA SAS address. Instead return
522 * vSES's sas address.
524 if ((handle <= ioc->sas_hba.num_phys) &&
525 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
526 MPI2_SAS_DEVICE_INFO_SEP)))
527 *sas_address = ioc->sas_hba.sas_address;
529 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
533 /* we hit this because the given parent handle doesn't exist */
534 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
537 /* else error case */
538 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
539 handle, ioc_status, __FILE__, __LINE__, __func__);
544 * _scsih_determine_boot_device - determine boot device.
545 * @ioc: per adapter object
546 * @device: sas_device or pcie_device object
547 * @channel: SAS or PCIe channel
549 * Determines whether this device should be first reported device to
550 * to scsi-ml or sas transport, this purpose is for persistent boot device.
551 * There are primary, alternate, and current entries in bios page 2. The order
552 * priority is primary, alternate, then current. This routine saves
553 * the corresponding device object.
554 * The saved data to be used later in _scsih_probe_boot_devices().
557 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
560 struct _sas_device *sas_device;
561 struct _pcie_device *pcie_device;
562 struct _raid_device *raid_device;
565 u64 enclosure_logical_id;
568 /* only process this function when driver loads */
569 if (!ioc->is_driver_loading)
572 /* no Bios, return immediately */
573 if (!ioc->bios_pg3.BiosVersion)
576 if (channel == RAID_CHANNEL) {
577 raid_device = device;
578 sas_address = raid_device->wwid;
580 enclosure_logical_id = 0;
582 } else if (channel == PCIE_CHANNEL) {
583 pcie_device = device;
584 sas_address = pcie_device->wwid;
586 enclosure_logical_id = 0;
590 sas_address = sas_device->sas_address;
591 device_name = sas_device->device_name;
592 enclosure_logical_id = sas_device->enclosure_logical_id;
593 slot = sas_device->slot;
596 if (!ioc->req_boot_device.device) {
597 if (_scsih_is_boot_device(sas_address, device_name,
598 enclosure_logical_id, slot,
599 (ioc->bios_pg2.ReqBootDeviceForm &
600 MPI2_BIOSPAGE2_FORM_MASK),
601 &ioc->bios_pg2.RequestedBootDevice)) {
603 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
604 __func__, (u64)sas_address));
605 ioc->req_boot_device.device = device;
606 ioc->req_boot_device.channel = channel;
610 if (!ioc->req_alt_boot_device.device) {
611 if (_scsih_is_boot_device(sas_address, device_name,
612 enclosure_logical_id, slot,
613 (ioc->bios_pg2.ReqAltBootDeviceForm &
614 MPI2_BIOSPAGE2_FORM_MASK),
615 &ioc->bios_pg2.RequestedAltBootDevice)) {
617 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
618 __func__, (u64)sas_address));
619 ioc->req_alt_boot_device.device = device;
620 ioc->req_alt_boot_device.channel = channel;
624 if (!ioc->current_boot_device.device) {
625 if (_scsih_is_boot_device(sas_address, device_name,
626 enclosure_logical_id, slot,
627 (ioc->bios_pg2.CurrentBootDeviceForm &
628 MPI2_BIOSPAGE2_FORM_MASK),
629 &ioc->bios_pg2.CurrentBootDevice)) {
631 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
632 __func__, (u64)sas_address));
633 ioc->current_boot_device.device = device;
634 ioc->current_boot_device.channel = channel;
639 static struct _sas_device *
640 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
641 struct MPT3SAS_TARGET *tgt_priv)
643 struct _sas_device *ret;
645 assert_spin_locked(&ioc->sas_device_lock);
647 ret = tgt_priv->sas_dev;
654 static struct _sas_device *
655 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
656 struct MPT3SAS_TARGET *tgt_priv)
658 struct _sas_device *ret;
661 spin_lock_irqsave(&ioc->sas_device_lock, flags);
662 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
663 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
668 static struct _pcie_device *
669 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
670 struct MPT3SAS_TARGET *tgt_priv)
672 struct _pcie_device *ret;
674 assert_spin_locked(&ioc->pcie_device_lock);
676 ret = tgt_priv->pcie_dev;
678 pcie_device_get(ret);
684 * mpt3sas_get_pdev_from_target - pcie device search
685 * @ioc: per adapter object
686 * @tgt_priv: starget private object
688 * Context: This function will acquire ioc->pcie_device_lock and will release
689 * before returning the pcie_device object.
691 * This searches for pcie_device from target, then return pcie_device object.
693 static struct _pcie_device *
694 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
695 struct MPT3SAS_TARGET *tgt_priv)
697 struct _pcie_device *ret;
700 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
701 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
702 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
709 * __mpt3sas_get_sdev_by_rphy - sas device search
710 * @ioc: per adapter object
711 * @rphy: sas_rphy pointer
713 * Context: This function will acquire ioc->sas_device_lock and will release
714 * before returning the sas_device object.
716 * This searches for sas_device from rphy object
717 * then return sas_device object.
720 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
721 struct sas_rphy *rphy)
723 struct _sas_device *sas_device;
725 assert_spin_locked(&ioc->sas_device_lock);
727 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
728 if (sas_device->rphy != rphy)
730 sas_device_get(sas_device);
735 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
736 if (sas_device->rphy != rphy)
738 sas_device_get(sas_device);
746 * mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
747 * sas address from sas_device_list list
748 * @ioc: per adapter object
751 * Search for _sas_device object corresponding to provided sas address,
752 * if available return _sas_device object address otherwise return NULL.
755 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
756 u64 sas_address, struct hba_port *port)
758 struct _sas_device *sas_device;
763 assert_spin_locked(&ioc->sas_device_lock);
765 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
766 if (sas_device->sas_address != sas_address)
768 if (sas_device->port != port)
770 sas_device_get(sas_device);
774 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
775 if (sas_device->sas_address != sas_address)
777 if (sas_device->port != port)
779 sas_device_get(sas_device);
787 * mpt3sas_get_sdev_by_addr - sas device search
788 * @ioc: per adapter object
789 * @sas_address: sas address
790 * @port: hba port entry
791 * Context: Calling function should acquire ioc->sas_device_lock
793 * This searches for sas_device based on sas_address & port number,
794 * then return sas_device object.
797 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
798 u64 sas_address, struct hba_port *port)
800 struct _sas_device *sas_device;
803 spin_lock_irqsave(&ioc->sas_device_lock, flags);
804 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
806 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
811 static struct _sas_device *
812 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
814 struct _sas_device *sas_device;
816 assert_spin_locked(&ioc->sas_device_lock);
818 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
819 if (sas_device->handle == handle)
822 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
823 if (sas_device->handle == handle)
829 sas_device_get(sas_device);
834 * mpt3sas_get_sdev_by_handle - sas device search
835 * @ioc: per adapter object
836 * @handle: sas device handle (assigned by firmware)
837 * Context: Calling function should acquire ioc->sas_device_lock
839 * This searches for sas_device based on sas_address, then return sas_device
843 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
845 struct _sas_device *sas_device;
848 spin_lock_irqsave(&ioc->sas_device_lock, flags);
849 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
850 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
856 * _scsih_display_enclosure_chassis_info - display device location info
857 * @ioc: per adapter object
858 * @sas_device: per sas device object
859 * @sdev: scsi device struct
860 * @starget: scsi target struct
863 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
864 struct _sas_device *sas_device, struct scsi_device *sdev,
865 struct scsi_target *starget)
868 if (sas_device->enclosure_handle != 0)
869 sdev_printk(KERN_INFO, sdev,
870 "enclosure logical id (0x%016llx), slot(%d) \n",
872 sas_device->enclosure_logical_id,
874 if (sas_device->connector_name[0] != '\0')
875 sdev_printk(KERN_INFO, sdev,
876 "enclosure level(0x%04x), connector name( %s)\n",
877 sas_device->enclosure_level,
878 sas_device->connector_name);
879 if (sas_device->is_chassis_slot_valid)
880 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
881 sas_device->chassis_slot);
882 } else if (starget) {
883 if (sas_device->enclosure_handle != 0)
884 starget_printk(KERN_INFO, starget,
885 "enclosure logical id(0x%016llx), slot(%d) \n",
887 sas_device->enclosure_logical_id,
889 if (sas_device->connector_name[0] != '\0')
890 starget_printk(KERN_INFO, starget,
891 "enclosure level(0x%04x), connector name( %s)\n",
892 sas_device->enclosure_level,
893 sas_device->connector_name);
894 if (sas_device->is_chassis_slot_valid)
895 starget_printk(KERN_INFO, starget,
896 "chassis slot(0x%04x)\n",
897 sas_device->chassis_slot);
899 if (sas_device->enclosure_handle != 0)
900 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
901 (u64)sas_device->enclosure_logical_id,
903 if (sas_device->connector_name[0] != '\0')
904 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
905 sas_device->enclosure_level,
906 sas_device->connector_name);
907 if (sas_device->is_chassis_slot_valid)
908 ioc_info(ioc, "chassis slot(0x%04x)\n",
909 sas_device->chassis_slot);
914 * _scsih_sas_device_remove - remove sas_device from list.
915 * @ioc: per adapter object
916 * @sas_device: the sas_device object
917 * Context: This function will acquire ioc->sas_device_lock.
919 * If sas_device is on the list, remove it and decrement its reference count.
922 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
923 struct _sas_device *sas_device)
929 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
930 sas_device->handle, (u64)sas_device->sas_address);
932 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
935 * The lock serializes access to the list, but we still need to verify
936 * that nobody removed the entry while we were waiting on the lock.
938 spin_lock_irqsave(&ioc->sas_device_lock, flags);
939 if (!list_empty(&sas_device->list)) {
940 list_del_init(&sas_device->list);
941 sas_device_put(sas_device);
943 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
947 * _scsih_device_remove_by_handle - removing device object by handle
948 * @ioc: per adapter object
949 * @handle: device handle
952 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
954 struct _sas_device *sas_device;
957 if (ioc->shost_recovery)
960 spin_lock_irqsave(&ioc->sas_device_lock, flags);
961 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
963 list_del_init(&sas_device->list);
964 sas_device_put(sas_device);
966 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
968 _scsih_remove_device(ioc, sas_device);
969 sas_device_put(sas_device);
974 * mpt3sas_device_remove_by_sas_address - removing device object by
975 * sas address & port number
976 * @ioc: per adapter object
977 * @sas_address: device sas_address
978 * @port: hba port entry
983 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
984 u64 sas_address, struct hba_port *port)
986 struct _sas_device *sas_device;
989 if (ioc->shost_recovery)
992 spin_lock_irqsave(&ioc->sas_device_lock, flags);
993 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
995 list_del_init(&sas_device->list);
996 sas_device_put(sas_device);
998 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1000 _scsih_remove_device(ioc, sas_device);
1001 sas_device_put(sas_device);
1006 * _scsih_sas_device_add - insert sas_device to the list.
1007 * @ioc: per adapter object
1008 * @sas_device: the sas_device object
1009 * Context: This function will acquire ioc->sas_device_lock.
1011 * Adding new object to the ioc->sas_device_list.
1014 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1015 struct _sas_device *sas_device)
1017 unsigned long flags;
1020 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1021 __func__, sas_device->handle,
1022 (u64)sas_device->sas_address));
1024 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1027 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1028 sas_device_get(sas_device);
1029 list_add_tail(&sas_device->list, &ioc->sas_device_list);
1030 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1032 if (ioc->hide_drives) {
1033 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1037 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1038 sas_device->sas_address_parent, sas_device->port)) {
1039 _scsih_sas_device_remove(ioc, sas_device);
1040 } else if (!sas_device->starget) {
1042 * When asyn scanning is enabled, its not possible to remove
1043 * devices while scanning is turned on due to an oops in
1044 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
1046 if (!ioc->is_driver_loading) {
1047 mpt3sas_transport_port_remove(ioc,
1048 sas_device->sas_address,
1049 sas_device->sas_address_parent,
1051 _scsih_sas_device_remove(ioc, sas_device);
1054 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1058 * _scsih_sas_device_init_add - insert sas_device to the list.
1059 * @ioc: per adapter object
1060 * @sas_device: the sas_device object
1061 * Context: This function will acquire ioc->sas_device_lock.
1063 * Adding new object at driver load time to the ioc->sas_device_init_list.
1066 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1067 struct _sas_device *sas_device)
1069 unsigned long flags;
1072 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1073 __func__, sas_device->handle,
1074 (u64)sas_device->sas_address));
1076 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1079 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1080 sas_device_get(sas_device);
1081 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1082 _scsih_determine_boot_device(ioc, sas_device, 0);
1083 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1087 static struct _pcie_device *
1088 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1090 struct _pcie_device *pcie_device;
1092 assert_spin_locked(&ioc->pcie_device_lock);
1094 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1095 if (pcie_device->wwid == wwid)
1098 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1099 if (pcie_device->wwid == wwid)
1105 pcie_device_get(pcie_device);
1111 * mpt3sas_get_pdev_by_wwid - pcie device search
1112 * @ioc: per adapter object
1115 * Context: This function will acquire ioc->pcie_device_lock and will release
1116 * before returning the pcie_device object.
1118 * This searches for pcie_device based on wwid, then return pcie_device object.
1120 static struct _pcie_device *
1121 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1123 struct _pcie_device *pcie_device;
1124 unsigned long flags;
1126 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1127 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1128 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1134 static struct _pcie_device *
1135 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1138 struct _pcie_device *pcie_device;
1140 assert_spin_locked(&ioc->pcie_device_lock);
1142 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1143 if (pcie_device->id == id && pcie_device->channel == channel)
1146 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1147 if (pcie_device->id == id && pcie_device->channel == channel)
1153 pcie_device_get(pcie_device);
1157 static struct _pcie_device *
1158 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1160 struct _pcie_device *pcie_device;
1162 assert_spin_locked(&ioc->pcie_device_lock);
1164 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1165 if (pcie_device->handle == handle)
1168 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1169 if (pcie_device->handle == handle)
1175 pcie_device_get(pcie_device);
1181 * mpt3sas_get_pdev_by_handle - pcie device search
1182 * @ioc: per adapter object
1183 * @handle: Firmware device handle
1185 * Context: This function will acquire ioc->pcie_device_lock and will release
1186 * before returning the pcie_device object.
1188 * This searches for pcie_device based on handle, then return pcie_device
1191 struct _pcie_device *
1192 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1194 struct _pcie_device *pcie_device;
1195 unsigned long flags;
1197 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1198 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1199 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1205 * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1206 * @ioc: per adapter object
1207 * Context: This function will acquire ioc->pcie_device_lock
1209 * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1210 * which has reported maximum among all available NVMe drives.
1211 * Minimum max_shutdown_latency will be six seconds.
1214 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1216 struct _pcie_device *pcie_device;
1217 unsigned long flags;
1218 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1220 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1221 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1222 if (pcie_device->shutdown_latency) {
1223 if (shutdown_latency < pcie_device->shutdown_latency)
1225 pcie_device->shutdown_latency;
1228 ioc->max_shutdown_latency = shutdown_latency;
1229 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1233 * _scsih_pcie_device_remove - remove pcie_device from list.
1234 * @ioc: per adapter object
1235 * @pcie_device: the pcie_device object
1236 * Context: This function will acquire ioc->pcie_device_lock.
1238 * If pcie_device is on the list, remove it and decrement its reference count.
1241 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1242 struct _pcie_device *pcie_device)
1244 unsigned long flags;
1245 int was_on_pcie_device_list = 0;
1246 u8 update_latency = 0;
1250 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1251 pcie_device->handle, (u64)pcie_device->wwid);
1252 if (pcie_device->enclosure_handle != 0)
1253 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1254 (u64)pcie_device->enclosure_logical_id,
1256 if (pcie_device->connector_name[0] != '\0')
1257 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1258 pcie_device->enclosure_level,
1259 pcie_device->connector_name);
1261 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1262 if (!list_empty(&pcie_device->list)) {
1263 list_del_init(&pcie_device->list);
1264 was_on_pcie_device_list = 1;
1266 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1268 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1269 if (was_on_pcie_device_list) {
1270 kfree(pcie_device->serial_number);
1271 pcie_device_put(pcie_device);
1275 * This device's RTD3 Entry Latency matches IOC's
1276 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1277 * from the available drives as current drive is getting removed.
1280 _scsih_set_nvme_max_shutdown_latency(ioc);
1285 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1286 * @ioc: per adapter object
1287 * @handle: device handle
1290 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1292 struct _pcie_device *pcie_device;
1293 unsigned long flags;
1294 int was_on_pcie_device_list = 0;
1295 u8 update_latency = 0;
1297 if (ioc->shost_recovery)
1300 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1301 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1303 if (!list_empty(&pcie_device->list)) {
1304 list_del_init(&pcie_device->list);
1305 was_on_pcie_device_list = 1;
1306 pcie_device_put(pcie_device);
1308 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1311 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1312 if (was_on_pcie_device_list) {
1313 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1314 pcie_device_put(pcie_device);
1318 * This device's RTD3 Entry Latency matches IOC's
1319 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1320 * from the available drives as current drive is getting removed.
1323 _scsih_set_nvme_max_shutdown_latency(ioc);
1327 * _scsih_pcie_device_add - add pcie_device object
1328 * @ioc: per adapter object
1329 * @pcie_device: pcie_device object
1331 * This is added to the pcie_device_list link list.
1334 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1335 struct _pcie_device *pcie_device)
1337 unsigned long flags;
1340 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1342 pcie_device->handle, (u64)pcie_device->wwid));
1343 if (pcie_device->enclosure_handle != 0)
1345 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1347 (u64)pcie_device->enclosure_logical_id,
1348 pcie_device->slot));
1349 if (pcie_device->connector_name[0] != '\0')
1351 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1352 __func__, pcie_device->enclosure_level,
1353 pcie_device->connector_name));
1355 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1356 pcie_device_get(pcie_device);
1357 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1358 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1360 if (pcie_device->access_status ==
1361 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1362 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1365 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1366 _scsih_pcie_device_remove(ioc, pcie_device);
1367 } else if (!pcie_device->starget) {
1368 if (!ioc->is_driver_loading) {
1369 /*TODO-- Need to find out whether this condition will occur or not*/
1370 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1373 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1377 * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1378 * @ioc: per adapter object
1379 * @pcie_device: the pcie_device object
1380 * Context: This function will acquire ioc->pcie_device_lock.
1382 * Adding new object at driver load time to the ioc->pcie_device_init_list.
1385 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1386 struct _pcie_device *pcie_device)
1388 unsigned long flags;
1391 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1393 pcie_device->handle, (u64)pcie_device->wwid));
1394 if (pcie_device->enclosure_handle != 0)
1396 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1398 (u64)pcie_device->enclosure_logical_id,
1399 pcie_device->slot));
1400 if (pcie_device->connector_name[0] != '\0')
1402 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1403 __func__, pcie_device->enclosure_level,
1404 pcie_device->connector_name));
1406 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1407 pcie_device_get(pcie_device);
1408 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1409 if (pcie_device->access_status !=
1410 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1411 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1412 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1415 * _scsih_raid_device_find_by_id - raid device search
1416 * @ioc: per adapter object
1417 * @id: sas device target id
1418 * @channel: sas device channel
1419 * Context: Calling function should acquire ioc->raid_device_lock
1421 * This searches for raid_device based on target id, then return raid_device
1424 static struct _raid_device *
1425 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1427 struct _raid_device *raid_device, *r;
1430 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1431 if (raid_device->id == id && raid_device->channel == channel) {
1442 * mpt3sas_raid_device_find_by_handle - raid device search
1443 * @ioc: per adapter object
1444 * @handle: sas device handle (assigned by firmware)
1445 * Context: Calling function should acquire ioc->raid_device_lock
1447 * This searches for raid_device based on handle, then return raid_device
1450 struct _raid_device *
1451 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1453 struct _raid_device *raid_device, *r;
1456 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1457 if (raid_device->handle != handle)
1468 * _scsih_raid_device_find_by_wwid - raid device search
1469 * @ioc: per adapter object
1471 * Context: Calling function should acquire ioc->raid_device_lock
1473 * This searches for raid_device based on wwid, then return raid_device
1476 static struct _raid_device *
1477 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1479 struct _raid_device *raid_device, *r;
1482 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1483 if (raid_device->wwid != wwid)
1494 * _scsih_raid_device_add - add raid_device object
1495 * @ioc: per adapter object
1496 * @raid_device: raid_device object
1498 * This is added to the raid_device_list link list.
1501 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1502 struct _raid_device *raid_device)
1504 unsigned long flags;
1507 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1509 raid_device->handle, (u64)raid_device->wwid));
1511 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1512 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1513 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1517 * _scsih_raid_device_remove - delete raid_device object
1518 * @ioc: per adapter object
1519 * @raid_device: raid_device object
1523 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1524 struct _raid_device *raid_device)
1526 unsigned long flags;
1528 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1529 list_del(&raid_device->list);
1531 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1535 * mpt3sas_scsih_expander_find_by_handle - expander device search
1536 * @ioc: per adapter object
1537 * @handle: expander handle (assigned by firmware)
1538 * Context: Calling function should acquire ioc->sas_device_lock
1540 * This searches for expander device based on handle, then returns the
1544 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1546 struct _sas_node *sas_expander, *r;
1549 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1550 if (sas_expander->handle != handle)
1560 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1561 * @ioc: per adapter object
1562 * @handle: enclosure handle (assigned by firmware)
1563 * Context: Calling function should acquire ioc->sas_device_lock
1565 * This searches for enclosure device based on handle, then returns the
1568 static struct _enclosure_node *
1569 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1571 struct _enclosure_node *enclosure_dev, *r;
1574 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1575 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1584 * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1585 * @ioc: per adapter object
1586 * @sas_address: sas address
1587 * @port: hba port entry
1588 * Context: Calling function should acquire ioc->sas_node_lock.
1590 * This searches for expander device based on sas_address & port number,
1591 * then returns the sas_node object.
1594 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1595 u64 sas_address, struct hba_port *port)
1597 struct _sas_node *sas_expander, *r = NULL;
1602 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1603 if (sas_expander->sas_address != sas_address)
1605 if (sas_expander->port != port)
1615 * _scsih_expander_node_add - insert expander device to the list.
1616 * @ioc: per adapter object
1617 * @sas_expander: the sas_device object
1618 * Context: This function will acquire ioc->sas_node_lock.
1620 * Adding new object to the ioc->sas_expander_list.
1623 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1624 struct _sas_node *sas_expander)
1626 unsigned long flags;
1628 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1629 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1630 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1634 * _scsih_is_end_device - determines if device is an end device
1635 * @device_info: bitfield providing information about the device.
1638 * Return: 1 if end device.
1641 _scsih_is_end_device(u32 device_info)
1643 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1644 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1645 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1646 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1653 * _scsih_is_nvme_pciescsi_device - determines if
1654 * device is an pcie nvme/scsi device
1655 * @device_info: bitfield providing information about the device.
1658 * Returns 1 if device is pcie device type nvme/scsi.
1661 _scsih_is_nvme_pciescsi_device(u32 device_info)
1663 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1664 == MPI26_PCIE_DEVINFO_NVME) ||
1665 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1666 == MPI26_PCIE_DEVINFO_SCSI))
1673 * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1674 * @ioc: per adapter object
1677 * Context: This function will acquire ioc->scsi_lookup_lock.
1679 * This will search for a matching channel:id in the scsi_lookup array,
1680 * returning 1 if found.
1683 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1687 struct scsi_cmnd *scmd;
1690 smid <= ioc->shost->can_queue; smid++) {
1691 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1694 if (scmd->device->id == id &&
1695 scmd->device->channel == channel)
1702 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1703 * @ioc: per adapter object
1707 * Context: This function will acquire ioc->scsi_lookup_lock.
1709 * This will search for a matching channel:id:lun in the scsi_lookup array,
1710 * returning 1 if found.
1713 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1714 unsigned int lun, int channel)
1717 struct scsi_cmnd *scmd;
1719 for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1721 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1724 if (scmd->device->id == id &&
1725 scmd->device->channel == channel &&
1726 scmd->device->lun == lun)
1733 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1734 * @ioc: per adapter object
1735 * @smid: system request message index
1737 * Return: the smid stored scmd pointer.
1738 * Then will dereference the stored scmd pointer.
1741 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1743 struct scsi_cmnd *scmd = NULL;
1744 struct scsiio_tracker *st;
1745 Mpi25SCSIIORequest_t *mpi_request;
1748 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1749 u32 unique_tag = smid - 1;
1751 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1754 * If SCSI IO request is outstanding at driver level then
1755 * DevHandle filed must be non-zero. If DevHandle is zero
1756 * then it means that this smid is free at driver level,
1759 if (!mpi_request->DevHandle)
1762 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1764 st = scsi_cmd_priv(scmd);
1765 if (st->cb_idx == 0xFF || st->smid == 0)
1773 * scsih_change_queue_depth - setting device queue depth
1774 * @sdev: scsi device struct
1775 * @qdepth: requested queue depth
1777 * Return: queue depth.
1780 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1782 struct Scsi_Host *shost = sdev->host;
1784 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1785 struct MPT3SAS_DEVICE *sas_device_priv_data;
1786 struct MPT3SAS_TARGET *sas_target_priv_data;
1787 struct _sas_device *sas_device;
1788 unsigned long flags;
1790 max_depth = shost->can_queue;
1793 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1796 if (ioc->enable_sdev_max_qd)
1799 sas_device_priv_data = sdev->hostdata;
1800 if (!sas_device_priv_data)
1802 sas_target_priv_data = sas_device_priv_data->sas_target;
1803 if (!sas_target_priv_data)
1805 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1808 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1809 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1811 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1812 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1814 sas_device_put(sas_device);
1816 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1820 if (!sdev->tagged_supported)
1822 if (qdepth > max_depth)
1824 scsi_change_queue_depth(sdev, qdepth);
1825 sdev_printk(KERN_INFO, sdev,
1826 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1827 sdev->queue_depth, sdev->tagged_supported,
1828 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1829 return sdev->queue_depth;
1833 * mpt3sas_scsih_change_queue_depth - setting device queue depth
1834 * @sdev: scsi device struct
1835 * @qdepth: requested queue depth
1840 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1842 struct Scsi_Host *shost = sdev->host;
1843 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1845 if (ioc->enable_sdev_max_qd)
1846 qdepth = shost->can_queue;
1848 scsih_change_queue_depth(sdev, qdepth);
1852 * scsih_target_alloc - target add routine
1853 * @starget: scsi target struct
1855 * Return: 0 if ok. Any other return is assumed to be an error and
1856 * the device is ignored.
1859 scsih_target_alloc(struct scsi_target *starget)
1861 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1862 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1863 struct MPT3SAS_TARGET *sas_target_priv_data;
1864 struct _sas_device *sas_device;
1865 struct _raid_device *raid_device;
1866 struct _pcie_device *pcie_device;
1867 unsigned long flags;
1868 struct sas_rphy *rphy;
1870 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1872 if (!sas_target_priv_data)
1875 starget->hostdata = sas_target_priv_data;
1876 sas_target_priv_data->starget = starget;
1877 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1880 if (starget->channel == RAID_CHANNEL) {
1881 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1882 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1885 sas_target_priv_data->handle = raid_device->handle;
1886 sas_target_priv_data->sas_address = raid_device->wwid;
1887 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1888 if (ioc->is_warpdrive)
1889 sas_target_priv_data->raid_device = raid_device;
1890 raid_device->starget = starget;
1892 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1897 if (starget->channel == PCIE_CHANNEL) {
1898 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1899 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1902 sas_target_priv_data->handle = pcie_device->handle;
1903 sas_target_priv_data->sas_address = pcie_device->wwid;
1904 sas_target_priv_data->port = NULL;
1905 sas_target_priv_data->pcie_dev = pcie_device;
1906 pcie_device->starget = starget;
1907 pcie_device->id = starget->id;
1908 pcie_device->channel = starget->channel;
1909 sas_target_priv_data->flags |=
1910 MPT_TARGET_FLAGS_PCIE_DEVICE;
1911 if (pcie_device->fast_path)
1912 sas_target_priv_data->flags |=
1913 MPT_TARGET_FASTPATH_IO;
1915 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1919 /* sas/sata devices */
1920 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1921 rphy = dev_to_rphy(starget->dev.parent);
1922 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1925 sas_target_priv_data->handle = sas_device->handle;
1926 sas_target_priv_data->sas_address = sas_device->sas_address;
1927 sas_target_priv_data->port = sas_device->port;
1928 sas_target_priv_data->sas_dev = sas_device;
1929 sas_device->starget = starget;
1930 sas_device->id = starget->id;
1931 sas_device->channel = starget->channel;
1932 if (test_bit(sas_device->handle, ioc->pd_handles))
1933 sas_target_priv_data->flags |=
1934 MPT_TARGET_FLAGS_RAID_COMPONENT;
1935 if (sas_device->fast_path)
1936 sas_target_priv_data->flags |=
1937 MPT_TARGET_FASTPATH_IO;
1939 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1945 * scsih_target_destroy - target destroy routine
1946 * @starget: scsi target struct
1949 scsih_target_destroy(struct scsi_target *starget)
1951 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1952 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1953 struct MPT3SAS_TARGET *sas_target_priv_data;
1954 struct _sas_device *sas_device;
1955 struct _raid_device *raid_device;
1956 struct _pcie_device *pcie_device;
1957 unsigned long flags;
1959 sas_target_priv_data = starget->hostdata;
1960 if (!sas_target_priv_data)
1963 if (starget->channel == RAID_CHANNEL) {
1964 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1965 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1968 raid_device->starget = NULL;
1969 raid_device->sdev = NULL;
1971 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1975 if (starget->channel == PCIE_CHANNEL) {
1976 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1977 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1978 sas_target_priv_data);
1979 if (pcie_device && (pcie_device->starget == starget) &&
1980 (pcie_device->id == starget->id) &&
1981 (pcie_device->channel == starget->channel))
1982 pcie_device->starget = NULL;
1986 * Corresponding get() is in _scsih_target_alloc()
1988 sas_target_priv_data->pcie_dev = NULL;
1989 pcie_device_put(pcie_device);
1990 pcie_device_put(pcie_device);
1992 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1996 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1997 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1998 if (sas_device && (sas_device->starget == starget) &&
1999 (sas_device->id == starget->id) &&
2000 (sas_device->channel == starget->channel))
2001 sas_device->starget = NULL;
2005 * Corresponding get() is in _scsih_target_alloc()
2007 sas_target_priv_data->sas_dev = NULL;
2008 sas_device_put(sas_device);
2010 sas_device_put(sas_device);
2012 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2015 kfree(sas_target_priv_data);
2016 starget->hostdata = NULL;
2020 * scsih_slave_alloc - device add routine
2021 * @sdev: scsi device struct
2023 * Return: 0 if ok. Any other return is assumed to be an error and
2024 * the device is ignored.
2027 scsih_slave_alloc(struct scsi_device *sdev)
2029 struct Scsi_Host *shost;
2030 struct MPT3SAS_ADAPTER *ioc;
2031 struct MPT3SAS_TARGET *sas_target_priv_data;
2032 struct MPT3SAS_DEVICE *sas_device_priv_data;
2033 struct scsi_target *starget;
2034 struct _raid_device *raid_device;
2035 struct _sas_device *sas_device;
2036 struct _pcie_device *pcie_device;
2037 unsigned long flags;
2039 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2041 if (!sas_device_priv_data)
2044 sas_device_priv_data->lun = sdev->lun;
2045 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2047 starget = scsi_target(sdev);
2048 sas_target_priv_data = starget->hostdata;
2049 sas_target_priv_data->num_luns++;
2050 sas_device_priv_data->sas_target = sas_target_priv_data;
2051 sdev->hostdata = sas_device_priv_data;
2052 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2053 sdev->no_uld_attach = 1;
2055 shost = dev_to_shost(&starget->dev);
2056 ioc = shost_priv(shost);
2057 if (starget->channel == RAID_CHANNEL) {
2058 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2059 raid_device = _scsih_raid_device_find_by_id(ioc,
2060 starget->id, starget->channel);
2062 raid_device->sdev = sdev; /* raid is single lun */
2063 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2065 if (starget->channel == PCIE_CHANNEL) {
2066 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2067 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2068 sas_target_priv_data->sas_address);
2069 if (pcie_device && (pcie_device->starget == NULL)) {
2070 sdev_printk(KERN_INFO, sdev,
2071 "%s : pcie_device->starget set to starget @ %d\n",
2072 __func__, __LINE__);
2073 pcie_device->starget = starget;
2077 pcie_device_put(pcie_device);
2078 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2080 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2081 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2082 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2083 sas_target_priv_data->sas_address,
2084 sas_target_priv_data->port);
2085 if (sas_device && (sas_device->starget == NULL)) {
2086 sdev_printk(KERN_INFO, sdev,
2087 "%s : sas_device->starget set to starget @ %d\n",
2088 __func__, __LINE__);
2089 sas_device->starget = starget;
2093 sas_device_put(sas_device);
2095 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2102 * scsih_slave_destroy - device destroy routine
2103 * @sdev: scsi device struct
2106 scsih_slave_destroy(struct scsi_device *sdev)
2108 struct MPT3SAS_TARGET *sas_target_priv_data;
2109 struct scsi_target *starget;
2110 struct Scsi_Host *shost;
2111 struct MPT3SAS_ADAPTER *ioc;
2112 struct _sas_device *sas_device;
2113 struct _pcie_device *pcie_device;
2114 unsigned long flags;
2116 if (!sdev->hostdata)
2119 starget = scsi_target(sdev);
2120 sas_target_priv_data = starget->hostdata;
2121 sas_target_priv_data->num_luns--;
2123 shost = dev_to_shost(&starget->dev);
2124 ioc = shost_priv(shost);
2126 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2127 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2128 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2129 sas_target_priv_data);
2130 if (pcie_device && !sas_target_priv_data->num_luns)
2131 pcie_device->starget = NULL;
2134 pcie_device_put(pcie_device);
2136 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2138 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2139 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2140 sas_device = __mpt3sas_get_sdev_from_target(ioc,
2141 sas_target_priv_data);
2142 if (sas_device && !sas_target_priv_data->num_luns)
2143 sas_device->starget = NULL;
2146 sas_device_put(sas_device);
2147 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2150 kfree(sdev->hostdata);
2151 sdev->hostdata = NULL;
2155 * _scsih_display_sata_capabilities - sata capabilities
2156 * @ioc: per adapter object
2157 * @handle: device handle
2158 * @sdev: scsi device struct
2161 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2162 u16 handle, struct scsi_device *sdev)
2164 Mpi2ConfigReply_t mpi_reply;
2165 Mpi2SasDevicePage0_t sas_device_pg0;
2170 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2171 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2172 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2173 __FILE__, __LINE__, __func__);
2177 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2178 MPI2_IOCSTATUS_MASK;
2179 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2180 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2181 __FILE__, __LINE__, __func__);
2185 flags = le16_to_cpu(sas_device_pg0.Flags);
2186 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2188 sdev_printk(KERN_INFO, sdev,
2189 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2190 "sw_preserve(%s)\n",
2191 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2192 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2193 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2195 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2196 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2197 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2201 * raid transport support -
2202 * Enabled for SLES11 and newer, in older kernels the driver will panic when
2203 * unloading the driver followed by a load - I believe that the subroutine
2204 * raid_class_release() is not cleaning up properly.
2208 * scsih_is_raid - return boolean indicating device is raid volume
2209 * @dev: the device struct object
2212 scsih_is_raid(struct device *dev)
2214 struct scsi_device *sdev = to_scsi_device(dev);
2215 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2217 if (ioc->is_warpdrive)
2219 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2223 scsih_is_nvme(struct device *dev)
2225 struct scsi_device *sdev = to_scsi_device(dev);
2227 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2231 * scsih_get_resync - get raid volume resync percent complete
2232 * @dev: the device struct object
2235 scsih_get_resync(struct device *dev)
2237 struct scsi_device *sdev = to_scsi_device(dev);
2238 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2239 static struct _raid_device *raid_device;
2240 unsigned long flags;
2241 Mpi2RaidVolPage0_t vol_pg0;
2242 Mpi2ConfigReply_t mpi_reply;
2243 u32 volume_status_flags;
2244 u8 percent_complete;
2247 percent_complete = 0;
2249 if (ioc->is_warpdrive)
2252 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2253 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2256 handle = raid_device->handle;
2257 percent_complete = raid_device->percent_complete;
2259 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2264 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2265 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2266 sizeof(Mpi2RaidVolPage0_t))) {
2267 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2268 __FILE__, __LINE__, __func__);
2269 percent_complete = 0;
2273 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2274 if (!(volume_status_flags &
2275 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2276 percent_complete = 0;
2280 switch (ioc->hba_mpi_version_belonged) {
2282 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2286 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2292 * scsih_get_state - get raid volume level
2293 * @dev: the device struct object
2296 scsih_get_state(struct device *dev)
2298 struct scsi_device *sdev = to_scsi_device(dev);
2299 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2300 static struct _raid_device *raid_device;
2301 unsigned long flags;
2302 Mpi2RaidVolPage0_t vol_pg0;
2303 Mpi2ConfigReply_t mpi_reply;
2305 enum raid_state state = RAID_STATE_UNKNOWN;
2308 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2309 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2312 handle = raid_device->handle;
2313 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2318 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2319 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2320 sizeof(Mpi2RaidVolPage0_t))) {
2321 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2322 __FILE__, __LINE__, __func__);
2326 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2327 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2328 state = RAID_STATE_RESYNCING;
2332 switch (vol_pg0.VolumeState) {
2333 case MPI2_RAID_VOL_STATE_OPTIMAL:
2334 case MPI2_RAID_VOL_STATE_ONLINE:
2335 state = RAID_STATE_ACTIVE;
2337 case MPI2_RAID_VOL_STATE_DEGRADED:
2338 state = RAID_STATE_DEGRADED;
2340 case MPI2_RAID_VOL_STATE_FAILED:
2341 case MPI2_RAID_VOL_STATE_MISSING:
2342 state = RAID_STATE_OFFLINE;
2346 switch (ioc->hba_mpi_version_belonged) {
2348 raid_set_state(mpt2sas_raid_template, dev, state);
2352 raid_set_state(mpt3sas_raid_template, dev, state);
2358 * _scsih_set_level - set raid level
2360 * @sdev: scsi device struct
2361 * @volume_type: volume type
2364 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2365 struct scsi_device *sdev, u8 volume_type)
2367 enum raid_level level = RAID_LEVEL_UNKNOWN;
2369 switch (volume_type) {
2370 case MPI2_RAID_VOL_TYPE_RAID0:
2371 level = RAID_LEVEL_0;
2373 case MPI2_RAID_VOL_TYPE_RAID10:
2374 level = RAID_LEVEL_10;
2376 case MPI2_RAID_VOL_TYPE_RAID1E:
2377 level = RAID_LEVEL_1E;
2379 case MPI2_RAID_VOL_TYPE_RAID1:
2380 level = RAID_LEVEL_1;
2384 switch (ioc->hba_mpi_version_belonged) {
2386 raid_set_level(mpt2sas_raid_template,
2387 &sdev->sdev_gendev, level);
2391 raid_set_level(mpt3sas_raid_template,
2392 &sdev->sdev_gendev, level);
2399 * _scsih_get_volume_capabilities - volume capabilities
2400 * @ioc: per adapter object
2401 * @raid_device: the raid_device object
2403 * Return: 0 for success, else 1
2406 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2407 struct _raid_device *raid_device)
2409 Mpi2RaidVolPage0_t *vol_pg0;
2410 Mpi2RaidPhysDiskPage0_t pd_pg0;
2411 Mpi2SasDevicePage0_t sas_device_pg0;
2412 Mpi2ConfigReply_t mpi_reply;
2416 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2417 &num_pds)) || !num_pds) {
2419 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2420 __FILE__, __LINE__, __func__));
2424 raid_device->num_pds = num_pds;
2425 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2426 sizeof(Mpi2RaidVol0PhysDisk_t));
2427 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2430 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2431 __FILE__, __LINE__, __func__));
2435 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2436 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2438 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2439 __FILE__, __LINE__, __func__));
2444 raid_device->volume_type = vol_pg0->VolumeType;
2446 /* figure out what the underlying devices are by
2447 * obtaining the device_info bits for the 1st device
2449 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2450 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2451 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2452 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2453 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2454 le16_to_cpu(pd_pg0.DevHandle)))) {
2455 raid_device->device_info =
2456 le32_to_cpu(sas_device_pg0.DeviceInfo);
2465 * _scsih_enable_tlr - setting TLR flags
2466 * @ioc: per adapter object
2467 * @sdev: scsi device struct
2469 * Enabling Transaction Layer Retries for tape devices when
2470 * vpd page 0x90 is present
2474 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2478 if (sdev->type != TYPE_TAPE)
2481 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2484 sas_enable_tlr(sdev);
2485 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2486 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2492 * scsih_slave_configure - device configure routine.
2493 * @sdev: scsi device struct
2495 * Return: 0 if ok. Any other return is assumed to be an error and
2496 * the device is ignored.
2499 scsih_slave_configure(struct scsi_device *sdev)
2501 struct Scsi_Host *shost = sdev->host;
2502 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2503 struct MPT3SAS_DEVICE *sas_device_priv_data;
2504 struct MPT3SAS_TARGET *sas_target_priv_data;
2505 struct _sas_device *sas_device;
2506 struct _pcie_device *pcie_device;
2507 struct _raid_device *raid_device;
2508 unsigned long flags;
2513 u16 handle, volume_handle = 0;
2514 u64 volume_wwid = 0;
2517 sas_device_priv_data = sdev->hostdata;
2518 sas_device_priv_data->configured_lun = 1;
2519 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2520 sas_target_priv_data = sas_device_priv_data->sas_target;
2521 handle = sas_target_priv_data->handle;
2523 /* raid volume handling */
2524 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2526 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2527 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2528 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2531 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2532 __FILE__, __LINE__, __func__));
2536 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2538 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2539 __FILE__, __LINE__, __func__));
2544 * WARPDRIVE: Initialize the required data for Direct IO
2546 mpt3sas_init_warpdrive_properties(ioc, raid_device);
2548 /* RAID Queue Depth Support
2549 * IS volume = underlying qdepth of drive type, either
2550 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2551 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2553 if (raid_device->device_info &
2554 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2555 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2558 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2559 if (raid_device->device_info &
2560 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2566 switch (raid_device->volume_type) {
2567 case MPI2_RAID_VOL_TYPE_RAID0:
2570 case MPI2_RAID_VOL_TYPE_RAID1E:
2571 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2572 if (ioc->manu_pg10.OEMIdentifier &&
2573 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2574 MFG10_GF0_R10_DISPLAY) &&
2575 !(raid_device->num_pds % 2))
2580 case MPI2_RAID_VOL_TYPE_RAID1:
2581 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2584 case MPI2_RAID_VOL_TYPE_RAID10:
2585 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2588 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2590 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2595 if (!ioc->hide_ir_msg)
2596 sdev_printk(KERN_INFO, sdev,
2597 "%s: handle(0x%04x), wwid(0x%016llx),"
2598 " pd_count(%d), type(%s)\n",
2599 r_level, raid_device->handle,
2600 (unsigned long long)raid_device->wwid,
2601 raid_device->num_pds, ds);
2603 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2604 blk_queue_max_hw_sectors(sdev->request_queue,
2605 MPT3SAS_RAID_MAX_SECTORS);
2606 sdev_printk(KERN_INFO, sdev,
2607 "Set queue's max_sector to: %u\n",
2608 MPT3SAS_RAID_MAX_SECTORS);
2611 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2613 /* raid transport support */
2614 if (!ioc->is_warpdrive)
2615 _scsih_set_level(ioc, sdev, raid_device->volume_type);
2619 /* non-raid handling */
2620 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2621 if (mpt3sas_config_get_volume_handle(ioc, handle,
2624 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2625 __FILE__, __LINE__, __func__));
2628 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2629 volume_handle, &volume_wwid)) {
2631 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2632 __FILE__, __LINE__, __func__));
2638 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2639 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2640 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2641 sas_device_priv_data->sas_target->sas_address);
2643 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2645 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2646 __FILE__, __LINE__, __func__));
2650 qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2652 sdev_printk(KERN_INFO, sdev,
2653 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2654 ds, handle, (unsigned long long)pcie_device->wwid,
2655 pcie_device->port_num);
2656 if (pcie_device->enclosure_handle != 0)
2657 sdev_printk(KERN_INFO, sdev,
2658 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2660 (unsigned long long)pcie_device->enclosure_logical_id,
2662 if (pcie_device->connector_name[0] != '\0')
2663 sdev_printk(KERN_INFO, sdev,
2664 "%s: enclosure level(0x%04x),"
2665 "connector name( %s)\n", ds,
2666 pcie_device->enclosure_level,
2667 pcie_device->connector_name);
2669 if (pcie_device->nvme_mdts)
2670 blk_queue_max_hw_sectors(sdev->request_queue,
2671 pcie_device->nvme_mdts/512);
2673 pcie_device_put(pcie_device);
2674 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2675 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2676 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2677 ** merged and can eliminate holes created during merging
2680 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2681 sdev->request_queue);
2682 blk_queue_virt_boundary(sdev->request_queue,
2683 ioc->page_size - 1);
2687 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2688 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2689 sas_device_priv_data->sas_target->sas_address,
2690 sas_device_priv_data->sas_target->port);
2692 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2694 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2695 __FILE__, __LINE__, __func__));
2699 sas_device->volume_handle = volume_handle;
2700 sas_device->volume_wwid = volume_wwid;
2701 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2702 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2704 if (sas_device->device_info &
2705 MPI2_SAS_DEVICE_INFO_SEP) {
2706 sdev_printk(KERN_WARNING, sdev,
2707 "set ignore_delay_remove for handle(0x%04x)\n",
2708 sas_device_priv_data->sas_target->handle);
2709 sas_device_priv_data->ignore_delay_remove = 1;
2714 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2715 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2717 else if (sas_device->device_info &
2718 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2722 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2723 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2724 ds, handle, (unsigned long long)sas_device->sas_address,
2725 sas_device->phy, (unsigned long long)sas_device->device_name);
2727 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2729 sas_device_put(sas_device);
2730 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2733 _scsih_display_sata_capabilities(ioc, handle, sdev);
2736 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2739 sas_read_port_mode_page(sdev);
2740 _scsih_enable_tlr(ioc, sdev);
2747 * scsih_bios_param - fetch head, sector, cylinder info for a disk
2748 * @sdev: scsi device struct
2749 * @bdev: pointer to block device context
2750 * @capacity: device size (in 512 byte sectors)
2751 * @params: three element array to place output:
2752 * params[0] number of heads (max 255)
2753 * params[1] number of sectors (max 63)
2754 * params[2] number of cylinders
2757 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2758 sector_t capacity, int params[])
2768 dummy = heads * sectors;
2769 cylinders = capacity;
2770 sector_div(cylinders, dummy);
2773 * Handle extended translation size for logical drives
2776 if ((ulong)capacity >= 0x200000) {
2779 dummy = heads * sectors;
2780 cylinders = capacity;
2781 sector_div(cylinders, dummy);
2786 params[1] = sectors;
2787 params[2] = cylinders;
2793 * _scsih_response_code - translation of device response code
2794 * @ioc: per adapter object
2795 * @response_code: response code returned by the device
2798 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2802 switch (response_code) {
2803 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2804 desc = "task management request completed";
2806 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2807 desc = "invalid frame";
2809 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2810 desc = "task management request not supported";
2812 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2813 desc = "task management request failed";
2815 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2816 desc = "task management request succeeded";
2818 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2819 desc = "invalid lun";
2822 desc = "overlapped tag attempted";
2824 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2825 desc = "task queued, however not sent to target";
2831 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2835 * _scsih_tm_done - tm completion routine
2836 * @ioc: per adapter object
2837 * @smid: system request message index
2838 * @msix_index: MSIX table index supplied by the OS
2839 * @reply: reply message frame(lower 32bit addr)
2842 * The callback handler when using scsih_issue_tm.
2844 * Return: 1 meaning mf should be freed from _base_interrupt
2845 * 0 means the mf is freed from this function.
2848 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2850 MPI2DefaultReply_t *mpi_reply;
2852 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2854 if (ioc->tm_cmds.smid != smid)
2856 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2857 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2859 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2860 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2862 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2863 complete(&ioc->tm_cmds.done);
2868 * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2869 * @ioc: per adapter object
2870 * @handle: device handle
2872 * During taskmangement request, we need to freeze the device queue.
2875 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2877 struct MPT3SAS_DEVICE *sas_device_priv_data;
2878 struct scsi_device *sdev;
2881 shost_for_each_device(sdev, ioc->shost) {
2884 sas_device_priv_data = sdev->hostdata;
2885 if (!sas_device_priv_data)
2887 if (sas_device_priv_data->sas_target->handle == handle) {
2888 sas_device_priv_data->sas_target->tm_busy = 1;
2890 ioc->ignore_loginfos = 1;
2896 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2897 * @ioc: per adapter object
2898 * @handle: device handle
2900 * During taskmangement request, we need to freeze the device queue.
2903 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2905 struct MPT3SAS_DEVICE *sas_device_priv_data;
2906 struct scsi_device *sdev;
2909 shost_for_each_device(sdev, ioc->shost) {
2912 sas_device_priv_data = sdev->hostdata;
2913 if (!sas_device_priv_data)
2915 if (sas_device_priv_data->sas_target->handle == handle) {
2916 sas_device_priv_data->sas_target->tm_busy = 0;
2918 ioc->ignore_loginfos = 0;
2924 * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2925 * @ioc: per adapter object
2926 * @channel: the channel assigned by the OS
2927 * @id: the id assigned by the OS
2929 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2930 * @smid_task: smid assigned to the task
2932 * Look whether TM has aborted the timed out SCSI command, if
2933 * TM has aborted the IO then return SUCCESS else return FAILED.
2936 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2937 uint id, uint lun, u8 type, u16 smid_task)
2940 if (smid_task <= ioc->shost->can_queue) {
2942 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2943 if (!(_scsih_scsi_lookup_find_by_target(ioc,
2947 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2948 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2949 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2956 } else if (smid_task == ioc->scsih_cmds.smid) {
2957 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2958 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2960 } else if (smid_task == ioc->ctl_cmds.smid) {
2961 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2962 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2970 * scsih_tm_post_processing - post processing of target & LUN reset
2971 * @ioc: per adapter object
2972 * @handle: device handle
2973 * @channel: the channel assigned by the OS
2974 * @id: the id assigned by the OS
2976 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2977 * @smid_task: smid assigned to the task
2979 * Post processing of target & LUN reset. Due to interrupt latency
2980 * issue it possible that interrupt for aborted IO might not be
2981 * received yet. So before returning failure status, poll the
2982 * reply descriptor pools for the reply of timed out SCSI command.
2983 * Return FAILED status if reply for timed out is not received
2984 * otherwise return SUCCESS.
2987 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2988 uint channel, uint id, uint lun, u8 type, u16 smid_task)
2992 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
2997 "Poll ReplyDescriptor queues for completion of"
2998 " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
2999 smid_task, type, handle);
3002 * Due to interrupt latency issues, driver may receive interrupt for
3003 * TM first and then for aborted SCSI IO command. So, poll all the
3004 * ReplyDescriptor pools before returning the FAILED status to SML.
3006 mpt3sas_base_mask_interrupts(ioc);
3007 mpt3sas_base_sync_reply_irqs(ioc, 1);
3008 mpt3sas_base_unmask_interrupts(ioc);
3010 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3014 * mpt3sas_scsih_issue_tm - main routine for sending tm requests
3015 * @ioc: per adapter struct
3016 * @handle: device handle
3017 * @channel: the channel assigned by the OS
3018 * @id: the id assigned by the OS
3020 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3021 * @smid_task: smid assigned to the task
3022 * @msix_task: MSIX table index supplied by the OS
3023 * @timeout: timeout in seconds
3024 * @tr_method: Target Reset Method
3027 * A generic API for sending task management requests to firmware.
3029 * The callback index is set inside `ioc->tm_cb_idx`.
3030 * The caller is responsible to check for outstanding commands.
3032 * Return: SUCCESS or FAILED.
3035 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3036 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3037 u8 timeout, u8 tr_method)
3039 Mpi2SCSITaskManagementRequest_t *mpi_request;
3040 Mpi2SCSITaskManagementReply_t *mpi_reply;
3041 Mpi25SCSIIORequest_t *request;
3047 lockdep_assert_held(&ioc->tm_cmds.mutex);
3049 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3050 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3054 if (ioc->shost_recovery || ioc->remove_host ||
3055 ioc->pci_error_recovery) {
3056 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3060 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3061 if (ioc_state & MPI2_DOORBELL_USED) {
3062 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3063 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3064 return (!rc) ? SUCCESS : FAILED;
3067 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3068 mpt3sas_print_fault_code(ioc, ioc_state &
3069 MPI2_DOORBELL_DATA_MASK);
3070 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3071 return (!rc) ? SUCCESS : FAILED;
3072 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3073 MPI2_IOC_STATE_COREDUMP) {
3074 mpt3sas_print_coredump_info(ioc, ioc_state &
3075 MPI2_DOORBELL_DATA_MASK);
3076 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3077 return (!rc) ? SUCCESS : FAILED;
3080 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3082 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3087 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3088 handle, type, smid_task, timeout, tr_method));
3089 ioc->tm_cmds.status = MPT3_CMD_PENDING;
3090 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3091 ioc->tm_cmds.smid = smid;
3092 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3093 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3094 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3095 mpi_request->DevHandle = cpu_to_le16(handle);
3096 mpi_request->TaskType = type;
3097 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3098 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3099 mpi_request->MsgFlags = tr_method;
3100 mpi_request->TaskMID = cpu_to_le16(smid_task);
3101 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3102 mpt3sas_scsih_set_tm_flag(ioc, handle);
3103 init_completion(&ioc->tm_cmds.done);
3104 ioc->put_smid_hi_priority(ioc, smid, msix_task);
3105 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3106 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3107 mpt3sas_check_cmd_timeout(ioc,
3108 ioc->tm_cmds.status, mpi_request,
3109 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3111 rc = mpt3sas_base_hard_reset_handler(ioc,
3113 rc = (!rc) ? SUCCESS : FAILED;
3118 /* sync IRQs in case those were busy during flush. */
3119 mpt3sas_base_sync_reply_irqs(ioc, 0);
3121 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3122 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3123 mpi_reply = ioc->tm_cmds.reply;
3125 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3126 le16_to_cpu(mpi_reply->IOCStatus),
3127 le32_to_cpu(mpi_reply->IOCLogInfo),
3128 le32_to_cpu(mpi_reply->TerminationCount)));
3129 if (ioc->logging_level & MPT_DEBUG_TM) {
3130 _scsih_response_code(ioc, mpi_reply->ResponseCode);
3131 if (mpi_reply->IOCStatus)
3132 _debug_dump_mf(mpi_request,
3133 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3138 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3141 * If DevHandle filed in smid_task's entry of request pool
3142 * doesn't match with device handle on which this task abort
3143 * TM is received then it means that TM has successfully
3144 * aborted the timed out command. Since smid_task's entry in
3145 * request pool will be memset to zero once the timed out
3146 * command is returned to the SML. If the command is not
3147 * aborted then smid_task’s entry won’t be cleared and it
3148 * will have same DevHandle value on which this task abort TM
3149 * is received and driver will return the TM status as FAILED.
3151 request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3152 if (le16_to_cpu(request->DevHandle) != handle)
3155 ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3156 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3157 handle, timeout, tr_method, smid_task, msix_task);
3161 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3162 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3163 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3164 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3167 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3176 mpt3sas_scsih_clear_tm_flag(ioc, handle);
3177 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3181 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3182 uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3183 u16 msix_task, u8 timeout, u8 tr_method)
3187 mutex_lock(&ioc->tm_cmds.mutex);
3188 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3189 smid_task, msix_task, timeout, tr_method);
3190 mutex_unlock(&ioc->tm_cmds.mutex);
3196 * _scsih_tm_display_info - displays info about the device
3197 * @ioc: per adapter struct
3198 * @scmd: pointer to scsi command object
3200 * Called by task management callback handlers.
3203 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3205 struct scsi_target *starget = scmd->device->sdev_target;
3206 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3207 struct _sas_device *sas_device = NULL;
3208 struct _pcie_device *pcie_device = NULL;
3209 unsigned long flags;
3210 char *device_str = NULL;
3214 if (ioc->hide_ir_msg)
3215 device_str = "WarpDrive";
3217 device_str = "volume";
3219 scsi_print_command(scmd);
3220 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3221 starget_printk(KERN_INFO, starget,
3222 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
3223 device_str, priv_target->handle,
3224 device_str, (unsigned long long)priv_target->sas_address);
3226 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3227 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3228 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3230 starget_printk(KERN_INFO, starget,
3231 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3232 pcie_device->handle,
3233 (unsigned long long)pcie_device->wwid,
3234 pcie_device->port_num);
3235 if (pcie_device->enclosure_handle != 0)
3236 starget_printk(KERN_INFO, starget,
3237 "enclosure logical id(0x%016llx), slot(%d)\n",
3238 (unsigned long long)
3239 pcie_device->enclosure_logical_id,
3241 if (pcie_device->connector_name[0] != '\0')
3242 starget_printk(KERN_INFO, starget,
3243 "enclosure level(0x%04x), connector name( %s)\n",
3244 pcie_device->enclosure_level,
3245 pcie_device->connector_name);
3246 pcie_device_put(pcie_device);
3248 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3251 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3252 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3254 if (priv_target->flags &
3255 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3256 starget_printk(KERN_INFO, starget,
3257 "volume handle(0x%04x), "
3258 "volume wwid(0x%016llx)\n",
3259 sas_device->volume_handle,
3260 (unsigned long long)sas_device->volume_wwid);
3262 starget_printk(KERN_INFO, starget,
3263 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3265 (unsigned long long)sas_device->sas_address,
3268 _scsih_display_enclosure_chassis_info(NULL, sas_device,
3271 sas_device_put(sas_device);
3273 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3278 * scsih_abort - eh threads main abort routine
3279 * @scmd: pointer to scsi command object
3281 * Return: SUCCESS if command aborted else FAILED
3284 scsih_abort(struct scsi_cmnd *scmd)
3286 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3287 struct MPT3SAS_DEVICE *sas_device_priv_data;
3288 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3293 struct _pcie_device *pcie_device = NULL;
3294 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3295 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3296 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3297 (scmd->request->timeout / HZ) * 1000);
3298 _scsih_tm_display_info(ioc, scmd);
3300 sas_device_priv_data = scmd->device->hostdata;
3301 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3303 sdev_printk(KERN_INFO, scmd->device,
3304 "device been deleted! scmd(0x%p)\n", scmd);
3305 scmd->result = DID_NO_CONNECT << 16;
3306 scmd->scsi_done(scmd);
3311 /* check for completed command */
3312 if (st == NULL || st->cb_idx == 0xFF) {
3313 sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3314 "driver, assuming scmd(0x%p) might have completed\n", scmd);
3315 scmd->result = DID_RESET << 16;
3320 /* for hidden raid components and volumes this is not supported */
3321 if (sas_device_priv_data->sas_target->flags &
3322 MPT_TARGET_FLAGS_RAID_COMPONENT ||
3323 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3324 scmd->result = DID_RESET << 16;
3329 mpt3sas_halt_firmware(ioc);
3331 handle = sas_device_priv_data->sas_target->handle;
3332 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3333 if (pcie_device && (!ioc->tm_custom_handling) &&
3334 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3335 timeout = ioc->nvme_abort_timeout;
3336 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3337 scmd->device->id, scmd->device->lun,
3338 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3339 st->smid, st->msix_io, timeout, 0);
3340 /* Command must be cleared after abort */
3341 if (r == SUCCESS && st->cb_idx != 0xFF)
3344 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3345 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3347 pcie_device_put(pcie_device);
3352 * scsih_dev_reset - eh threads main device reset routine
3353 * @scmd: pointer to scsi command object
3355 * Return: SUCCESS if command aborted else FAILED
3358 scsih_dev_reset(struct scsi_cmnd *scmd)
3360 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3361 struct MPT3SAS_DEVICE *sas_device_priv_data;
3362 struct _sas_device *sas_device = NULL;
3363 struct _pcie_device *pcie_device = NULL;
3369 struct scsi_target *starget = scmd->device->sdev_target;
3370 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3372 sdev_printk(KERN_INFO, scmd->device,
3373 "attempting device reset! scmd(0x%p)\n", scmd);
3374 _scsih_tm_display_info(ioc, scmd);
3376 sas_device_priv_data = scmd->device->hostdata;
3377 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3379 sdev_printk(KERN_INFO, scmd->device,
3380 "device been deleted! scmd(0x%p)\n", scmd);
3381 scmd->result = DID_NO_CONNECT << 16;
3382 scmd->scsi_done(scmd);
3387 /* for hidden raid components obtain the volume_handle */
3389 if (sas_device_priv_data->sas_target->flags &
3390 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3391 sas_device = mpt3sas_get_sdev_from_target(ioc,
3394 handle = sas_device->volume_handle;
3396 handle = sas_device_priv_data->sas_target->handle;
3399 scmd->result = DID_RESET << 16;
3404 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3406 if (pcie_device && (!ioc->tm_custom_handling) &&
3407 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3408 tr_timeout = pcie_device->reset_timeout;
3409 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3411 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3413 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3414 scmd->device->id, scmd->device->lun,
3415 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3416 tr_timeout, tr_method);
3417 /* Check for busy commands after reset */
3418 if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
3421 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3422 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3425 sas_device_put(sas_device);
3427 pcie_device_put(pcie_device);
3433 * scsih_target_reset - eh threads main target reset routine
3434 * @scmd: pointer to scsi command object
3436 * Return: SUCCESS if command aborted else FAILED
3439 scsih_target_reset(struct scsi_cmnd *scmd)
3441 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3442 struct MPT3SAS_DEVICE *sas_device_priv_data;
3443 struct _sas_device *sas_device = NULL;
3444 struct _pcie_device *pcie_device = NULL;
3449 struct scsi_target *starget = scmd->device->sdev_target;
3450 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3452 starget_printk(KERN_INFO, starget,
3453 "attempting target reset! scmd(0x%p)\n", scmd);
3454 _scsih_tm_display_info(ioc, scmd);
3456 sas_device_priv_data = scmd->device->hostdata;
3457 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3459 starget_printk(KERN_INFO, starget,
3460 "target been deleted! scmd(0x%p)\n", scmd);
3461 scmd->result = DID_NO_CONNECT << 16;
3462 scmd->scsi_done(scmd);
3467 /* for hidden raid components obtain the volume_handle */
3469 if (sas_device_priv_data->sas_target->flags &
3470 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3471 sas_device = mpt3sas_get_sdev_from_target(ioc,
3474 handle = sas_device->volume_handle;
3476 handle = sas_device_priv_data->sas_target->handle;
3479 scmd->result = DID_RESET << 16;
3484 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3486 if (pcie_device && (!ioc->tm_custom_handling) &&
3487 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3488 tr_timeout = pcie_device->reset_timeout;
3489 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3491 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3492 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3493 scmd->device->id, 0,
3494 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3495 tr_timeout, tr_method);
3496 /* Check for busy commands after reset */
3497 if (r == SUCCESS && atomic_read(&starget->target_busy))
3500 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3501 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3504 sas_device_put(sas_device);
3506 pcie_device_put(pcie_device);
3512 * scsih_host_reset - eh threads main host reset routine
3513 * @scmd: pointer to scsi command object
3515 * Return: SUCCESS if command aborted else FAILED
3518 scsih_host_reset(struct scsi_cmnd *scmd)
3520 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3523 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3524 scsi_print_command(scmd);
3526 if (ioc->is_driver_loading || ioc->remove_host) {
3527 ioc_info(ioc, "Blocking the host reset\n");
3532 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3533 r = (retval < 0) ? FAILED : SUCCESS;
3535 ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3536 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3542 * _scsih_fw_event_add - insert and queue up fw_event
3543 * @ioc: per adapter object
3544 * @fw_event: object describing the event
3545 * Context: This function will acquire ioc->fw_event_lock.
3547 * This adds the firmware event object into link list, then queues it up to
3548 * be processed from user context.
3551 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3553 unsigned long flags;
3555 if (ioc->firmware_event_thread == NULL)
3558 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3559 fw_event_work_get(fw_event);
3560 INIT_LIST_HEAD(&fw_event->list);
3561 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3562 INIT_WORK(&fw_event->work, _firmware_event_work);
3563 fw_event_work_get(fw_event);
3564 queue_work(ioc->firmware_event_thread, &fw_event->work);
3565 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3569 * _scsih_fw_event_del_from_list - delete fw_event from the list
3570 * @ioc: per adapter object
3571 * @fw_event: object describing the event
3572 * Context: This function will acquire ioc->fw_event_lock.
3574 * If the fw_event is on the fw_event_list, remove it and do a put.
3577 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3580 unsigned long flags;
3582 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3583 if (!list_empty(&fw_event->list)) {
3584 list_del_init(&fw_event->list);
3585 fw_event_work_put(fw_event);
3587 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3592 * mpt3sas_send_trigger_data_event - send event for processing trigger data
3593 * @ioc: per adapter object
3594 * @event_data: trigger event data
3597 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3598 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3600 struct fw_event_work *fw_event;
3603 if (ioc->is_driver_loading)
3605 sz = sizeof(*event_data);
3606 fw_event = alloc_fw_event_work(sz);
3609 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3610 fw_event->ioc = ioc;
3611 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3612 _scsih_fw_event_add(ioc, fw_event);
3613 fw_event_work_put(fw_event);
3617 * _scsih_error_recovery_delete_devices - remove devices not responding
3618 * @ioc: per adapter object
3621 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3623 struct fw_event_work *fw_event;
3625 if (ioc->is_driver_loading)
3627 fw_event = alloc_fw_event_work(0);
3630 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3631 fw_event->ioc = ioc;
3632 _scsih_fw_event_add(ioc, fw_event);
3633 fw_event_work_put(fw_event);
3637 * mpt3sas_port_enable_complete - port enable completed (fake event)
3638 * @ioc: per adapter object
3641 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3643 struct fw_event_work *fw_event;
3645 fw_event = alloc_fw_event_work(0);
3648 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3649 fw_event->ioc = ioc;
3650 _scsih_fw_event_add(ioc, fw_event);
3651 fw_event_work_put(fw_event);
3654 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3656 unsigned long flags;
3657 struct fw_event_work *fw_event = NULL;
3659 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3660 if (!list_empty(&ioc->fw_event_list)) {
3661 fw_event = list_first_entry(&ioc->fw_event_list,
3662 struct fw_event_work, list);
3663 list_del_init(&fw_event->list);
3665 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3671 * _scsih_fw_event_cleanup_queue - cleanup event queue
3672 * @ioc: per adapter object
3674 * Walk the firmware event queue, either killing timers, or waiting
3675 * for outstanding events to complete
3677 * Context: task, can sleep
3680 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3682 struct fw_event_work *fw_event;
3684 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3685 !ioc->firmware_event_thread)
3688 ioc->fw_events_cleanup = 1;
3689 while ((fw_event = dequeue_next_fw_event(ioc)) ||
3690 (fw_event = ioc->current_event)) {
3692 * Wait on the fw_event to complete. If this returns 1, then
3693 * the event was never executed, and we need a put for the
3694 * reference the work had on the fw_event.
3696 * If it did execute, we wait for it to finish, and the put will
3697 * happen from _firmware_event_work()
3699 if (cancel_work_sync(&fw_event->work))
3700 fw_event_work_put(fw_event);
3702 fw_event_work_put(fw_event);
3704 ioc->fw_events_cleanup = 0;
3708 * _scsih_internal_device_block - block the sdev device
3709 * @sdev: per device object
3710 * @sas_device_priv_data : per device driver private data
3712 * make sure device is blocked without error, if not
3716 _scsih_internal_device_block(struct scsi_device *sdev,
3717 struct MPT3SAS_DEVICE *sas_device_priv_data)
3721 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3722 sas_device_priv_data->sas_target->handle);
3723 sas_device_priv_data->block = 1;
3725 r = scsi_internal_device_block_nowait(sdev);
3727 sdev_printk(KERN_WARNING, sdev,
3728 "device_block failed with return(%d) for handle(0x%04x)\n",
3729 r, sas_device_priv_data->sas_target->handle);
3733 * _scsih_internal_device_unblock - unblock the sdev device
3734 * @sdev: per device object
3735 * @sas_device_priv_data : per device driver private data
3736 * make sure device is unblocked without error, if not retry
3737 * by blocking and then unblocking
3741 _scsih_internal_device_unblock(struct scsi_device *sdev,
3742 struct MPT3SAS_DEVICE *sas_device_priv_data)
3746 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3747 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3748 sas_device_priv_data->block = 0;
3749 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3751 /* The device has been set to SDEV_RUNNING by SD layer during
3752 * device addition but the request queue is still stopped by
3753 * our earlier block call. We need to perform a block again
3754 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3756 sdev_printk(KERN_WARNING, sdev,
3757 "device_unblock failed with return(%d) for handle(0x%04x) "
3758 "performing a block followed by an unblock\n",
3759 r, sas_device_priv_data->sas_target->handle);
3760 sas_device_priv_data->block = 1;
3761 r = scsi_internal_device_block_nowait(sdev);
3763 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3764 "failed with return(%d) for handle(0x%04x)\n",
3765 r, sas_device_priv_data->sas_target->handle);
3767 sas_device_priv_data->block = 0;
3768 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3770 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3771 " failed with return(%d) for handle(0x%04x)\n",
3772 r, sas_device_priv_data->sas_target->handle);
3777 * _scsih_ublock_io_all_device - unblock every device
3778 * @ioc: per adapter object
3780 * change the device state from block to running
3783 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3785 struct MPT3SAS_DEVICE *sas_device_priv_data;
3786 struct scsi_device *sdev;
3788 shost_for_each_device(sdev, ioc->shost) {
3789 sas_device_priv_data = sdev->hostdata;
3790 if (!sas_device_priv_data)
3792 if (!sas_device_priv_data->block)
3795 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3796 "device_running, handle(0x%04x)\n",
3797 sas_device_priv_data->sas_target->handle));
3798 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3804 * _scsih_ublock_io_device - prepare device to be deleted
3805 * @ioc: per adapter object
3806 * @sas_address: sas address
3807 * @port: hba port entry
3809 * unblock then put device in offline state
3812 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3813 u64 sas_address, struct hba_port *port)
3815 struct MPT3SAS_DEVICE *sas_device_priv_data;
3816 struct scsi_device *sdev;
3818 shost_for_each_device(sdev, ioc->shost) {
3819 sas_device_priv_data = sdev->hostdata;
3820 if (!sas_device_priv_data)
3822 if (sas_device_priv_data->sas_target->sas_address
3825 if (sas_device_priv_data->sas_target->port != port)
3827 if (sas_device_priv_data->block)
3828 _scsih_internal_device_unblock(sdev,
3829 sas_device_priv_data);
3834 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3835 * @ioc: per adapter object
3837 * During device pull we need to appropriately set the sdev state.
3840 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3842 struct MPT3SAS_DEVICE *sas_device_priv_data;
3843 struct scsi_device *sdev;
3845 shost_for_each_device(sdev, ioc->shost) {
3846 sas_device_priv_data = sdev->hostdata;
3847 if (!sas_device_priv_data)
3849 if (sas_device_priv_data->block)
3851 if (sas_device_priv_data->ignore_delay_remove) {
3852 sdev_printk(KERN_INFO, sdev,
3853 "%s skip device_block for SES handle(0x%04x)\n",
3854 __func__, sas_device_priv_data->sas_target->handle);
3857 _scsih_internal_device_block(sdev, sas_device_priv_data);
3862 * _scsih_block_io_device - set the device state to SDEV_BLOCK
3863 * @ioc: per adapter object
3864 * @handle: device handle
3866 * During device pull we need to appropriately set the sdev state.
3869 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3871 struct MPT3SAS_DEVICE *sas_device_priv_data;
3872 struct scsi_device *sdev;
3873 struct _sas_device *sas_device;
3875 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3877 shost_for_each_device(sdev, ioc->shost) {
3878 sas_device_priv_data = sdev->hostdata;
3879 if (!sas_device_priv_data)
3881 if (sas_device_priv_data->sas_target->handle != handle)
3883 if (sas_device_priv_data->block)
3885 if (sas_device && sas_device->pend_sas_rphy_add)
3887 if (sas_device_priv_data->ignore_delay_remove) {
3888 sdev_printk(KERN_INFO, sdev,
3889 "%s skip device_block for SES handle(0x%04x)\n",
3890 __func__, sas_device_priv_data->sas_target->handle);
3893 _scsih_internal_device_block(sdev, sas_device_priv_data);
3897 sas_device_put(sas_device);
3901 * _scsih_block_io_to_children_attached_to_ex
3902 * @ioc: per adapter object
3903 * @sas_expander: the sas_device object
3905 * This routine set sdev state to SDEV_BLOCK for all devices
3906 * attached to this expander. This function called when expander is
3910 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3911 struct _sas_node *sas_expander)
3913 struct _sas_port *mpt3sas_port;
3914 struct _sas_device *sas_device;
3915 struct _sas_node *expander_sibling;
3916 unsigned long flags;
3921 list_for_each_entry(mpt3sas_port,
3922 &sas_expander->sas_port_list, port_list) {
3923 if (mpt3sas_port->remote_identify.device_type ==
3925 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3926 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3927 mpt3sas_port->remote_identify.sas_address,
3928 mpt3sas_port->hba_port);
3930 set_bit(sas_device->handle,
3931 ioc->blocking_handles);
3932 sas_device_put(sas_device);
3934 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3938 list_for_each_entry(mpt3sas_port,
3939 &sas_expander->sas_port_list, port_list) {
3941 if (mpt3sas_port->remote_identify.device_type ==
3942 SAS_EDGE_EXPANDER_DEVICE ||
3943 mpt3sas_port->remote_identify.device_type ==
3944 SAS_FANOUT_EXPANDER_DEVICE) {
3946 mpt3sas_scsih_expander_find_by_sas_address(
3947 ioc, mpt3sas_port->remote_identify.sas_address,
3948 mpt3sas_port->hba_port);
3949 _scsih_block_io_to_children_attached_to_ex(ioc,
3956 * _scsih_block_io_to_children_attached_directly
3957 * @ioc: per adapter object
3958 * @event_data: topology change event data
3960 * This routine set sdev state to SDEV_BLOCK for all devices
3961 * direct attached during device pull.
3964 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3965 Mpi2EventDataSasTopologyChangeList_t *event_data)
3971 for (i = 0; i < event_data->NumEntries; i++) {
3972 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3975 reason_code = event_data->PHY[i].PhyStatus &
3976 MPI2_EVENT_SAS_TOPO_RC_MASK;
3977 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3978 _scsih_block_io_device(ioc, handle);
3983 * _scsih_block_io_to_pcie_children_attached_directly
3984 * @ioc: per adapter object
3985 * @event_data: topology change event data
3987 * This routine set sdev state to SDEV_BLOCK for all devices
3988 * direct attached during device pull/reconnect.
3991 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3992 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
3998 for (i = 0; i < event_data->NumEntries; i++) {
4000 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4003 reason_code = event_data->PortEntry[i].PortStatus;
4005 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4006 _scsih_block_io_device(ioc, handle);
4010 * _scsih_tm_tr_send - send task management request
4011 * @ioc: per adapter object
4012 * @handle: device handle
4013 * Context: interrupt time.
4015 * This code is to initiate the device removal handshake protocol
4016 * with controller firmware. This function will issue target reset
4017 * using high priority request queue. It will send a sas iounit
4018 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
4020 * This is designed to send muliple task management request at the same
4021 * time to the fifo. If the fifo is full, we will append the request,
4022 * and process it in a future completion.
4025 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4027 Mpi2SCSITaskManagementRequest_t *mpi_request;
4029 struct _sas_device *sas_device = NULL;
4030 struct _pcie_device *pcie_device = NULL;
4031 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4032 u64 sas_address = 0;
4033 unsigned long flags;
4034 struct _tr_list *delayed_tr;
4037 struct hba_port *port = NULL;
4039 if (ioc->pci_error_recovery) {
4041 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4045 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4046 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4048 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4053 /* if PD, then return */
4054 if (test_bit(handle, ioc->pd_handles))
4057 clear_bit(handle, ioc->pend_os_device_add);
4059 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4060 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4061 if (sas_device && sas_device->starget &&
4062 sas_device->starget->hostdata) {
4063 sas_target_priv_data = sas_device->starget->hostdata;
4064 sas_target_priv_data->deleted = 1;
4065 sas_address = sas_device->sas_address;
4066 port = sas_device->port;
4068 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4070 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4071 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4072 if (pcie_device && pcie_device->starget &&
4073 pcie_device->starget->hostdata) {
4074 sas_target_priv_data = pcie_device->starget->hostdata;
4075 sas_target_priv_data->deleted = 1;
4076 sas_address = pcie_device->wwid;
4078 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4079 if (pcie_device && (!ioc->tm_custom_handling) &&
4080 (!(mpt3sas_scsih_is_pcie_scsi_device(
4081 pcie_device->device_info))))
4083 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4085 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4087 if (sas_target_priv_data) {
4089 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4090 handle, (u64)sas_address));
4092 if (sas_device->enclosure_handle != 0)
4094 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4095 (u64)sas_device->enclosure_logical_id,
4097 if (sas_device->connector_name[0] != '\0')
4099 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4100 sas_device->enclosure_level,
4101 sas_device->connector_name));
4102 } else if (pcie_device) {
4103 if (pcie_device->enclosure_handle != 0)
4105 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4106 (u64)pcie_device->enclosure_logical_id,
4107 pcie_device->slot));
4108 if (pcie_device->connector_name[0] != '\0')
4110 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4111 pcie_device->enclosure_level,
4112 pcie_device->connector_name));
4114 _scsih_ublock_io_device(ioc, sas_address, port);
4115 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4118 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4120 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4123 INIT_LIST_HEAD(&delayed_tr->list);
4124 delayed_tr->handle = handle;
4125 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4127 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4133 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4134 handle, smid, ioc->tm_tr_cb_idx));
4135 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4136 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4137 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4138 mpi_request->DevHandle = cpu_to_le16(handle);
4139 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4140 mpi_request->MsgFlags = tr_method;
4141 set_bit(handle, ioc->device_remove_in_progress);
4142 ioc->put_smid_hi_priority(ioc, smid, 0);
4143 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4147 sas_device_put(sas_device);
4149 pcie_device_put(pcie_device);
4153 * _scsih_tm_tr_complete -
4154 * @ioc: per adapter object
4155 * @smid: system request message index
4156 * @msix_index: MSIX table index supplied by the OS
4157 * @reply: reply message frame(lower 32bit addr)
4158 * Context: interrupt time.
4160 * This is the target reset completion routine.
4161 * This code is part of the code to initiate the device removal
4162 * handshake protocol with controller firmware.
4163 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4165 * Return: 1 meaning mf should be freed from _base_interrupt
4166 * 0 means the mf is freed from this function.
4169 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4173 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4174 Mpi2SCSITaskManagementReply_t *mpi_reply =
4175 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4176 Mpi2SasIoUnitControlRequest_t *mpi_request;
4179 struct _sc_list *delayed_sc;
4181 if (ioc->pci_error_recovery) {
4183 ioc_info(ioc, "%s: host in pci error recovery\n",
4187 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4188 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4190 ioc_info(ioc, "%s: host is not operational\n",
4194 if (unlikely(!mpi_reply)) {
4195 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4196 __FILE__, __LINE__, __func__);
4199 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4200 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4201 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4203 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4205 le16_to_cpu(mpi_reply->DevHandle), smid));
4209 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4211 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4212 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4213 le32_to_cpu(mpi_reply->IOCLogInfo),
4214 le32_to_cpu(mpi_reply->TerminationCount)));
4216 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4217 if (!smid_sas_ctrl) {
4218 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4220 return _scsih_check_for_pending_tm(ioc, smid);
4221 INIT_LIST_HEAD(&delayed_sc->list);
4222 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4223 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4225 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4227 return _scsih_check_for_pending_tm(ioc, smid);
4231 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4232 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4233 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4234 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4235 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4236 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4237 mpi_request->DevHandle = mpi_request_tm->DevHandle;
4238 ioc->put_smid_default(ioc, smid_sas_ctrl);
4240 return _scsih_check_for_pending_tm(ioc, smid);
4243 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4244 * issue to IOC or not.
4245 * @ioc: per adapter object
4246 * @scmd: pointer to scsi command object
4248 * Returns true if scmd can be issued to IOC otherwise returns false.
4250 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4251 struct scsi_cmnd *scmd)
4254 if (ioc->pci_error_recovery)
4257 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4258 if (ioc->remove_host)
4264 if (ioc->remove_host) {
4266 switch (scmd->cmnd[0]) {
4267 case SYNCHRONIZE_CACHE:
4279 * _scsih_sas_control_complete - completion routine
4280 * @ioc: per adapter object
4281 * @smid: system request message index
4282 * @msix_index: MSIX table index supplied by the OS
4283 * @reply: reply message frame(lower 32bit addr)
4284 * Context: interrupt time.
4286 * This is the sas iounit control completion routine.
4287 * This code is part of the code to initiate the device removal
4288 * handshake protocol with controller firmware.
4290 * Return: 1 meaning mf should be freed from _base_interrupt
4291 * 0 means the mf is freed from this function.
4294 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4295 u8 msix_index, u32 reply)
4297 Mpi2SasIoUnitControlReply_t *mpi_reply =
4298 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4300 if (likely(mpi_reply)) {
4302 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4303 le16_to_cpu(mpi_reply->DevHandle), smid,
4304 le16_to_cpu(mpi_reply->IOCStatus),
4305 le32_to_cpu(mpi_reply->IOCLogInfo)));
4306 if (le16_to_cpu(mpi_reply->IOCStatus) ==
4307 MPI2_IOCSTATUS_SUCCESS) {
4308 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4309 ioc->device_remove_in_progress);
4312 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4313 __FILE__, __LINE__, __func__);
4315 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4319 * _scsih_tm_tr_volume_send - send target reset request for volumes
4320 * @ioc: per adapter object
4321 * @handle: device handle
4322 * Context: interrupt time.
4324 * This is designed to send muliple task management request at the same
4325 * time to the fifo. If the fifo is full, we will append the request,
4326 * and process it in a future completion.
4329 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4331 Mpi2SCSITaskManagementRequest_t *mpi_request;
4333 struct _tr_list *delayed_tr;
4335 if (ioc->pci_error_recovery) {
4337 ioc_info(ioc, "%s: host reset in progress!\n",
4342 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4344 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4347 INIT_LIST_HEAD(&delayed_tr->list);
4348 delayed_tr->handle = handle;
4349 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4351 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4357 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4358 handle, smid, ioc->tm_tr_volume_cb_idx));
4359 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4360 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4361 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4362 mpi_request->DevHandle = cpu_to_le16(handle);
4363 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4364 ioc->put_smid_hi_priority(ioc, smid, 0);
4368 * _scsih_tm_volume_tr_complete - target reset completion
4369 * @ioc: per adapter object
4370 * @smid: system request message index
4371 * @msix_index: MSIX table index supplied by the OS
4372 * @reply: reply message frame(lower 32bit addr)
4373 * Context: interrupt time.
4375 * Return: 1 meaning mf should be freed from _base_interrupt
4376 * 0 means the mf is freed from this function.
4379 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4380 u8 msix_index, u32 reply)
4383 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4384 Mpi2SCSITaskManagementReply_t *mpi_reply =
4385 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4387 if (ioc->shost_recovery || ioc->pci_error_recovery) {
4389 ioc_info(ioc, "%s: host reset in progress!\n",
4393 if (unlikely(!mpi_reply)) {
4394 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4395 __FILE__, __LINE__, __func__);
4399 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4400 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4401 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4403 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4404 handle, le16_to_cpu(mpi_reply->DevHandle),
4410 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4411 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4412 le32_to_cpu(mpi_reply->IOCLogInfo),
4413 le32_to_cpu(mpi_reply->TerminationCount)));
4415 return _scsih_check_for_pending_tm(ioc, smid);
4419 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4420 * @ioc: per adapter object
4421 * @smid: system request message index
4423 * @event_context: used to track events uniquely
4425 * Context - processed in interrupt context.
4428 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4431 Mpi2EventAckRequest_t *ack_request;
4432 int i = smid - ioc->internal_smid;
4433 unsigned long flags;
4435 /* Without releasing the smid just update the
4436 * call back index and reuse the same smid for
4437 * processing this delayed request
4439 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4440 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4441 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4444 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4445 le16_to_cpu(event), smid, ioc->base_cb_idx));
4446 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4447 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4448 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4449 ack_request->Event = event;
4450 ack_request->EventContext = event_context;
4451 ack_request->VF_ID = 0; /* TODO */
4452 ack_request->VP_ID = 0;
4453 ioc->put_smid_default(ioc, smid);
4457 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4458 * sas_io_unit_ctrl messages
4459 * @ioc: per adapter object
4460 * @smid: system request message index
4461 * @handle: device handle
4463 * Context - processed in interrupt context.
4466 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4467 u16 smid, u16 handle)
4469 Mpi2SasIoUnitControlRequest_t *mpi_request;
4471 int i = smid - ioc->internal_smid;
4472 unsigned long flags;
4474 if (ioc->remove_host) {
4476 ioc_info(ioc, "%s: host has been removed\n",
4479 } else if (ioc->pci_error_recovery) {
4481 ioc_info(ioc, "%s: host in pci error recovery\n",
4485 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4486 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4488 ioc_info(ioc, "%s: host is not operational\n",
4493 /* Without releasing the smid just update the
4494 * call back index and reuse the same smid for
4495 * processing this delayed request
4497 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4498 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4499 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4502 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4503 handle, smid, ioc->tm_sas_control_cb_idx));
4504 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4505 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4506 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4507 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4508 mpi_request->DevHandle = cpu_to_le16(handle);
4509 ioc->put_smid_default(ioc, smid);
4513 * _scsih_check_for_pending_internal_cmds - check for pending internal messages
4514 * @ioc: per adapter object
4515 * @smid: system request message index
4517 * Context: Executed in interrupt context
4519 * This will check delayed internal messages list, and process the
4522 * Return: 1 meaning mf should be freed from _base_interrupt
4523 * 0 means the mf is freed from this function.
4526 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4528 struct _sc_list *delayed_sc;
4529 struct _event_ack_list *delayed_event_ack;
4531 if (!list_empty(&ioc->delayed_event_ack_list)) {
4532 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4533 struct _event_ack_list, list);
4534 _scsih_issue_delayed_event_ack(ioc, smid,
4535 delayed_event_ack->Event, delayed_event_ack->EventContext);
4536 list_del(&delayed_event_ack->list);
4537 kfree(delayed_event_ack);
4541 if (!list_empty(&ioc->delayed_sc_list)) {
4542 delayed_sc = list_entry(ioc->delayed_sc_list.next,
4543 struct _sc_list, list);
4544 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4545 delayed_sc->handle);
4546 list_del(&delayed_sc->list);
4554 * _scsih_check_for_pending_tm - check for pending task management
4555 * @ioc: per adapter object
4556 * @smid: system request message index
4558 * This will check delayed target reset list, and feed the
4561 * Return: 1 meaning mf should be freed from _base_interrupt
4562 * 0 means the mf is freed from this function.
4565 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4567 struct _tr_list *delayed_tr;
4569 if (!list_empty(&ioc->delayed_tr_volume_list)) {
4570 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4571 struct _tr_list, list);
4572 mpt3sas_base_free_smid(ioc, smid);
4573 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4574 list_del(&delayed_tr->list);
4579 if (!list_empty(&ioc->delayed_tr_list)) {
4580 delayed_tr = list_entry(ioc->delayed_tr_list.next,
4581 struct _tr_list, list);
4582 mpt3sas_base_free_smid(ioc, smid);
4583 _scsih_tm_tr_send(ioc, delayed_tr->handle);
4584 list_del(&delayed_tr->list);
4593 * _scsih_check_topo_delete_events - sanity check on topo events
4594 * @ioc: per adapter object
4595 * @event_data: the event data payload
4597 * This routine added to better handle cable breaker.
4599 * This handles the case where driver receives multiple expander
4600 * add and delete events in a single shot. When there is a delete event
4601 * the routine will void any pending add events waiting in the event queue.
4604 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4605 Mpi2EventDataSasTopologyChangeList_t *event_data)
4607 struct fw_event_work *fw_event;
4608 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4609 u16 expander_handle;
4610 struct _sas_node *sas_expander;
4611 unsigned long flags;
4615 for (i = 0 ; i < event_data->NumEntries; i++) {
4616 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4619 reason_code = event_data->PHY[i].PhyStatus &
4620 MPI2_EVENT_SAS_TOPO_RC_MASK;
4621 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4622 _scsih_tm_tr_send(ioc, handle);
4625 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4626 if (expander_handle < ioc->sas_hba.num_phys) {
4627 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4630 if (event_data->ExpStatus ==
4631 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4632 /* put expander attached devices into blocking state */
4633 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4634 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4636 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4637 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4639 handle = find_first_bit(ioc->blocking_handles,
4640 ioc->facts.MaxDevHandle);
4641 if (handle < ioc->facts.MaxDevHandle)
4642 _scsih_block_io_device(ioc, handle);
4643 } while (test_and_clear_bit(handle, ioc->blocking_handles));
4644 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4645 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4647 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4650 /* mark ignore flag for pending events */
4651 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4652 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4653 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4656 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4657 fw_event->event_data;
4658 if (local_event_data->ExpStatus ==
4659 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4660 local_event_data->ExpStatus ==
4661 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4662 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4665 ioc_info(ioc, "setting ignoring flag\n"));
4666 fw_event->ignore = 1;
4670 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4674 * _scsih_check_pcie_topo_remove_events - sanity check on topo
4676 * @ioc: per adapter object
4677 * @event_data: the event data payload
4679 * This handles the case where driver receives multiple switch
4680 * or device add and delete events in a single shot. When there
4681 * is a delete event the routine will void any pending add
4682 * events waiting in the event queue.
4685 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4686 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4688 struct fw_event_work *fw_event;
4689 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4690 unsigned long flags;
4692 u16 handle, switch_handle;
4694 for (i = 0; i < event_data->NumEntries; i++) {
4696 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4699 reason_code = event_data->PortEntry[i].PortStatus;
4700 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4701 _scsih_tm_tr_send(ioc, handle);
4704 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4705 if (!switch_handle) {
4706 _scsih_block_io_to_pcie_children_attached_directly(
4710 /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4711 if ((event_data->SwitchStatus
4712 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4713 (event_data->SwitchStatus ==
4714 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4715 _scsih_block_io_to_pcie_children_attached_directly(
4718 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4721 /* mark ignore flag for pending events */
4722 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4723 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4724 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4728 (Mpi26EventDataPCIeTopologyChangeList_t *)
4729 fw_event->event_data;
4730 if (local_event_data->SwitchStatus ==
4731 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4732 local_event_data->SwitchStatus ==
4733 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4734 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4737 ioc_info(ioc, "setting ignoring flag for switch event\n"));
4738 fw_event->ignore = 1;
4742 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4746 * _scsih_set_volume_delete_flag - setting volume delete flag
4747 * @ioc: per adapter object
4748 * @handle: device handle
4750 * This returns nothing.
4753 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4755 struct _raid_device *raid_device;
4756 struct MPT3SAS_TARGET *sas_target_priv_data;
4757 unsigned long flags;
4759 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4760 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4761 if (raid_device && raid_device->starget &&
4762 raid_device->starget->hostdata) {
4763 sas_target_priv_data =
4764 raid_device->starget->hostdata;
4765 sas_target_priv_data->deleted = 1;
4767 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4768 handle, (u64)raid_device->wwid));
4770 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4774 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4775 * @handle: input handle
4776 * @a: handle for volume a
4777 * @b: handle for volume b
4779 * IR firmware only supports two raid volumes. The purpose of this
4780 * routine is to set the volume handle in either a or b. When the given
4781 * input handle is non-zero, or when a and b have not been set before.
4784 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4786 if (!handle || handle == *a || handle == *b)
4795 * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4796 * @ioc: per adapter object
4797 * @event_data: the event data payload
4798 * Context: interrupt time.
4800 * This routine will send target reset to volume, followed by target
4801 * resets to the PDs. This is called when a PD has been removed, or
4802 * volume has been deleted or removed. When the target reset is sent
4803 * to volume, the PD target resets need to be queued to start upon
4804 * completion of the volume target reset.
4807 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4808 Mpi2EventDataIrConfigChangeList_t *event_data)
4810 Mpi2EventIrConfigElement_t *element;
4812 u16 handle, volume_handle, a, b;
4813 struct _tr_list *delayed_tr;
4818 if (ioc->is_warpdrive)
4821 /* Volume Resets for Deleted or Removed */
4822 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4823 for (i = 0; i < event_data->NumElements; i++, element++) {
4824 if (le32_to_cpu(event_data->Flags) &
4825 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4827 if (element->ReasonCode ==
4828 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4829 element->ReasonCode ==
4830 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4831 volume_handle = le16_to_cpu(element->VolDevHandle);
4832 _scsih_set_volume_delete_flag(ioc, volume_handle);
4833 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4837 /* Volume Resets for UNHIDE events */
4838 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4839 for (i = 0; i < event_data->NumElements; i++, element++) {
4840 if (le32_to_cpu(event_data->Flags) &
4841 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4843 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4844 volume_handle = le16_to_cpu(element->VolDevHandle);
4845 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4850 _scsih_tm_tr_volume_send(ioc, a);
4852 _scsih_tm_tr_volume_send(ioc, b);
4854 /* PD target resets */
4855 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4856 for (i = 0; i < event_data->NumElements; i++, element++) {
4857 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4859 handle = le16_to_cpu(element->PhysDiskDevHandle);
4860 volume_handle = le16_to_cpu(element->VolDevHandle);
4861 clear_bit(handle, ioc->pd_handles);
4863 _scsih_tm_tr_send(ioc, handle);
4864 else if (volume_handle == a || volume_handle == b) {
4865 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4866 BUG_ON(!delayed_tr);
4867 INIT_LIST_HEAD(&delayed_tr->list);
4868 delayed_tr->handle = handle;
4869 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4871 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4874 _scsih_tm_tr_send(ioc, handle);
4880 * _scsih_check_volume_delete_events - set delete flag for volumes
4881 * @ioc: per adapter object
4882 * @event_data: the event data payload
4883 * Context: interrupt time.
4885 * This will handle the case when the cable connected to entire volume is
4886 * pulled. We will take care of setting the deleted flag so normal IO will
4890 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4891 Mpi2EventDataIrVolume_t *event_data)
4895 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4897 state = le32_to_cpu(event_data->NewValue);
4898 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4899 MPI2_RAID_VOL_STATE_FAILED)
4900 _scsih_set_volume_delete_flag(ioc,
4901 le16_to_cpu(event_data->VolDevHandle));
4905 * _scsih_temp_threshold_events - display temperature threshold exceeded events
4906 * @ioc: per adapter object
4907 * @event_data: the temp threshold event data
4908 * Context: interrupt time.
4911 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4912 Mpi2EventDataTemperature_t *event_data)
4915 if (ioc->temp_sensors_count >= event_data->SensorNum) {
4916 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4917 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4918 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4919 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4920 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4921 event_data->SensorNum);
4922 ioc_err(ioc, "Current Temp In Celsius: %d\n",
4923 event_data->CurrentTemperature);
4924 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4925 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4926 if ((doorbell & MPI2_IOC_STATE_MASK) ==
4927 MPI2_IOC_STATE_FAULT) {
4928 mpt3sas_print_fault_code(ioc,
4929 doorbell & MPI2_DOORBELL_DATA_MASK);
4930 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4931 MPI2_IOC_STATE_COREDUMP) {
4932 mpt3sas_print_coredump_info(ioc,
4933 doorbell & MPI2_DOORBELL_DATA_MASK);
4939 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4941 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4943 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4947 return test_and_set_bit(0, &priv->ata_command_pending);
4949 clear_bit(0, &priv->ata_command_pending);
4954 * _scsih_flush_running_cmds - completing outstanding commands.
4955 * @ioc: per adapter object
4957 * The flushing out of all pending scmd commands following host reset,
4958 * where all IO is dropped to the floor.
4961 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4963 struct scsi_cmnd *scmd;
4964 struct scsiio_tracker *st;
4968 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4969 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4973 _scsih_set_satl_pending(scmd, false);
4974 st = scsi_cmd_priv(scmd);
4975 mpt3sas_base_clear_st(ioc, st);
4976 scsi_dma_unmap(scmd);
4977 if (ioc->pci_error_recovery || ioc->remove_host)
4978 scmd->result = DID_NO_CONNECT << 16;
4980 scmd->result = DID_RESET << 16;
4981 scmd->scsi_done(scmd);
4983 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4987 * _scsih_setup_eedp - setup MPI request for EEDP transfer
4988 * @ioc: per adapter object
4989 * @scmd: pointer to scsi command object
4990 * @mpi_request: pointer to the SCSI_IO request message frame
4992 * Supporting protection 1 and 3.
4995 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4996 Mpi25SCSIIORequest_t *mpi_request)
4999 unsigned char prot_op = scsi_get_prot_op(scmd);
5000 unsigned char prot_type = scsi_get_prot_type(scmd);
5001 Mpi25SCSIIORequest_t *mpi_request_3v =
5002 (Mpi25SCSIIORequest_t *)mpi_request;
5004 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
5007 if (prot_op == SCSI_PROT_READ_STRIP)
5008 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5009 else if (prot_op == SCSI_PROT_WRITE_INSERT)
5010 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5014 switch (prot_type) {
5015 case SCSI_PROT_DIF_TYPE1:
5016 case SCSI_PROT_DIF_TYPE2:
5019 * enable ref/guard checking
5020 * auto increment ref tag
5022 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
5023 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
5024 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5025 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5026 cpu_to_be32(t10_pi_ref_tag(scmd->request));
5029 case SCSI_PROT_DIF_TYPE3:
5032 * enable guard checking
5034 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5039 mpi_request_3v->EEDPBlockSize =
5040 cpu_to_le16(scmd->device->sector_size);
5042 if (ioc->is_gen35_ioc)
5043 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5044 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5048 * _scsih_eedp_error_handling - return sense code for EEDP errors
5049 * @scmd: pointer to scsi command object
5050 * @ioc_status: ioc status
5053 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5057 switch (ioc_status) {
5058 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5061 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5064 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5071 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
5073 scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
5074 SAM_STAT_CHECK_CONDITION;
5078 * scsih_qcmd - main scsi request entry point
5079 * @shost: SCSI host pointer
5080 * @scmd: pointer to scsi command object
5082 * The callback index is set inside `ioc->scsi_io_cb_idx`.
5084 * Return: 0 on success. If there's a failure, return either:
5085 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
5086 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
5089 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5091 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5092 struct MPT3SAS_DEVICE *sas_device_priv_data;
5093 struct MPT3SAS_TARGET *sas_target_priv_data;
5094 struct _raid_device *raid_device;
5095 struct request *rq = scmd->request;
5097 Mpi25SCSIIORequest_t *mpi_request;
5098 struct _pcie_device *pcie_device = NULL;
5103 if (ioc->logging_level & MPT_DEBUG_SCSI)
5104 scsi_print_command(scmd);
5106 sas_device_priv_data = scmd->device->hostdata;
5107 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5108 scmd->result = DID_NO_CONNECT << 16;
5109 scmd->scsi_done(scmd);
5113 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5114 scmd->result = DID_NO_CONNECT << 16;
5115 scmd->scsi_done(scmd);
5119 sas_target_priv_data = sas_device_priv_data->sas_target;
5121 /* invalid device handle */
5122 handle = sas_target_priv_data->handle;
5123 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5124 scmd->result = DID_NO_CONNECT << 16;
5125 scmd->scsi_done(scmd);
5130 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5131 /* host recovery or link resets sent via IOCTLs */
5132 return SCSI_MLQUEUE_HOST_BUSY;
5133 } else if (sas_target_priv_data->deleted) {
5134 /* device has been deleted */
5135 scmd->result = DID_NO_CONNECT << 16;
5136 scmd->scsi_done(scmd);
5138 } else if (sas_target_priv_data->tm_busy ||
5139 sas_device_priv_data->block) {
5140 /* device busy with task management */
5141 return SCSI_MLQUEUE_DEVICE_BUSY;
5145 * Bug work around for firmware SATL handling. The loop
5146 * is based on atomic operations and ensures consistency
5147 * since we're lockless at this point
5150 if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5151 return SCSI_MLQUEUE_DEVICE_BUSY;
5152 } while (_scsih_set_satl_pending(scmd, true));
5154 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5155 mpi_control = MPI2_SCSIIO_CONTROL_READ;
5156 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5157 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5159 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5162 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5163 /* NCQ Prio supported, make sure control indicated high priority */
5164 if (sas_device_priv_data->ncq_prio_enable) {
5165 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5166 if (class == IOPRIO_CLASS_RT)
5167 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5169 /* Make sure Device is not raid volume.
5170 * We do not expose raid functionality to upper layer for warpdrive.
5172 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5173 && !scsih_is_nvme(&scmd->device->sdev_gendev))
5174 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5175 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5177 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5179 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5180 _scsih_set_satl_pending(scmd, false);
5183 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5184 memset(mpi_request, 0, ioc->request_sz);
5185 _scsih_setup_eedp(ioc, scmd, mpi_request);
5187 if (scmd->cmd_len == 32)
5188 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5189 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5190 if (sas_device_priv_data->sas_target->flags &
5191 MPT_TARGET_FLAGS_RAID_COMPONENT)
5192 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5194 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5195 mpi_request->DevHandle = cpu_to_le16(handle);
5196 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5197 mpi_request->Control = cpu_to_le32(mpi_control);
5198 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5199 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5200 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5201 mpi_request->SenseBufferLowAddress =
5202 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5203 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5204 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5206 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5208 if (mpi_request->DataLength) {
5209 pcie_device = sas_target_priv_data->pcie_dev;
5210 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5211 mpt3sas_base_free_smid(ioc, smid);
5212 _scsih_set_satl_pending(scmd, false);
5216 ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5218 raid_device = sas_target_priv_data->raid_device;
5219 if (raid_device && raid_device->direct_io_enabled)
5220 mpt3sas_setup_direct_io(ioc, scmd,
5221 raid_device, mpi_request);
5223 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5224 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5225 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5226 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5227 ioc->put_smid_fast_path(ioc, smid, handle);
5229 ioc->put_smid_scsi_io(ioc, smid,
5230 le16_to_cpu(mpi_request->DevHandle));
5232 ioc->put_smid_default(ioc, smid);
5236 return SCSI_MLQUEUE_HOST_BUSY;
5240 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5241 * @sense_buffer: sense data returned by target
5242 * @data: normalized skey/asc/ascq
5245 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5247 if ((sense_buffer[0] & 0x7F) >= 0x72) {
5248 /* descriptor format */
5249 data->skey = sense_buffer[1] & 0x0F;
5250 data->asc = sense_buffer[2];
5251 data->ascq = sense_buffer[3];
5254 data->skey = sense_buffer[2] & 0x0F;
5255 data->asc = sense_buffer[12];
5256 data->ascq = sense_buffer[13];
5261 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
5262 * @ioc: per adapter object
5263 * @scmd: pointer to scsi command object
5264 * @mpi_reply: reply mf payload returned from firmware
5267 * scsi_status - SCSI Status code returned from target device
5268 * scsi_state - state info associated with SCSI_IO determined by ioc
5269 * ioc_status - ioc supplied status info
5272 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5273 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5277 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5278 MPI2_IOCSTATUS_MASK;
5279 u8 scsi_state = mpi_reply->SCSIState;
5280 u8 scsi_status = mpi_reply->SCSIStatus;
5281 char *desc_ioc_state = NULL;
5282 char *desc_scsi_status = NULL;
5283 char *desc_scsi_state = ioc->tmp_string;
5284 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5285 struct _sas_device *sas_device = NULL;
5286 struct _pcie_device *pcie_device = NULL;
5287 struct scsi_target *starget = scmd->device->sdev_target;
5288 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5289 char *device_str = NULL;
5293 if (ioc->hide_ir_msg)
5294 device_str = "WarpDrive";
5296 device_str = "volume";
5298 if (log_info == 0x31170000)
5301 switch (ioc_status) {
5302 case MPI2_IOCSTATUS_SUCCESS:
5303 desc_ioc_state = "success";
5305 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5306 desc_ioc_state = "invalid function";
5308 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5309 desc_ioc_state = "scsi recovered error";
5311 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5312 desc_ioc_state = "scsi invalid dev handle";
5314 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5315 desc_ioc_state = "scsi device not there";
5317 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5318 desc_ioc_state = "scsi data overrun";
5320 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5321 desc_ioc_state = "scsi data underrun";
5323 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5324 desc_ioc_state = "scsi io data error";
5326 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5327 desc_ioc_state = "scsi protocol error";
5329 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5330 desc_ioc_state = "scsi task terminated";
5332 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5333 desc_ioc_state = "scsi residual mismatch";
5335 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5336 desc_ioc_state = "scsi task mgmt failed";
5338 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5339 desc_ioc_state = "scsi ioc terminated";
5341 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5342 desc_ioc_state = "scsi ext terminated";
5344 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5345 desc_ioc_state = "eedp guard error";
5347 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5348 desc_ioc_state = "eedp ref tag error";
5350 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5351 desc_ioc_state = "eedp app tag error";
5353 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5354 desc_ioc_state = "insufficient power";
5357 desc_ioc_state = "unknown";
5361 switch (scsi_status) {
5362 case MPI2_SCSI_STATUS_GOOD:
5363 desc_scsi_status = "good";
5365 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5366 desc_scsi_status = "check condition";
5368 case MPI2_SCSI_STATUS_CONDITION_MET:
5369 desc_scsi_status = "condition met";
5371 case MPI2_SCSI_STATUS_BUSY:
5372 desc_scsi_status = "busy";
5374 case MPI2_SCSI_STATUS_INTERMEDIATE:
5375 desc_scsi_status = "intermediate";
5377 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5378 desc_scsi_status = "intermediate condmet";
5380 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5381 desc_scsi_status = "reservation conflict";
5383 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5384 desc_scsi_status = "command terminated";
5386 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5387 desc_scsi_status = "task set full";
5389 case MPI2_SCSI_STATUS_ACA_ACTIVE:
5390 desc_scsi_status = "aca active";
5392 case MPI2_SCSI_STATUS_TASK_ABORTED:
5393 desc_scsi_status = "task aborted";
5396 desc_scsi_status = "unknown";
5400 desc_scsi_state[0] = '\0';
5402 desc_scsi_state = " ";
5403 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5404 strcat(desc_scsi_state, "response info ");
5405 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5406 strcat(desc_scsi_state, "state terminated ");
5407 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5408 strcat(desc_scsi_state, "no status ");
5409 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5410 strcat(desc_scsi_state, "autosense failed ");
5411 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5412 strcat(desc_scsi_state, "autosense valid ");
5414 scsi_print_command(scmd);
5416 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5417 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5418 device_str, (u64)priv_target->sas_address);
5419 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5420 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5422 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5423 (u64)pcie_device->wwid, pcie_device->port_num);
5424 if (pcie_device->enclosure_handle != 0)
5425 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5426 (u64)pcie_device->enclosure_logical_id,
5428 if (pcie_device->connector_name[0])
5429 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5430 pcie_device->enclosure_level,
5431 pcie_device->connector_name);
5432 pcie_device_put(pcie_device);
5435 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5437 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5438 (u64)sas_device->sas_address, sas_device->phy);
5440 _scsih_display_enclosure_chassis_info(ioc, sas_device,
5443 sas_device_put(sas_device);
5447 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5448 le16_to_cpu(mpi_reply->DevHandle),
5449 desc_ioc_state, ioc_status, smid);
5450 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5451 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5452 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5453 le16_to_cpu(mpi_reply->TaskTag),
5454 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5455 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5456 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5458 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5459 struct sense_info data;
5460 _scsih_normalize_sense(scmd->sense_buffer, &data);
5461 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5462 data.skey, data.asc, data.ascq,
5463 le32_to_cpu(mpi_reply->SenseCount));
5465 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5466 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5467 response_bytes = (u8 *)&response_info;
5468 _scsih_response_code(ioc, response_bytes[0]);
5473 * _scsih_turn_on_pfa_led - illuminate PFA LED
5474 * @ioc: per adapter object
5475 * @handle: device handle
5479 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5481 Mpi2SepReply_t mpi_reply;
5482 Mpi2SepRequest_t mpi_request;
5483 struct _sas_device *sas_device;
5485 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5489 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5490 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5491 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5492 mpi_request.SlotStatus =
5493 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5494 mpi_request.DevHandle = cpu_to_le16(handle);
5495 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5496 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5497 &mpi_request)) != 0) {
5498 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5499 __FILE__, __LINE__, __func__);
5502 sas_device->pfa_led_on = 1;
5504 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5506 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5507 le16_to_cpu(mpi_reply.IOCStatus),
5508 le32_to_cpu(mpi_reply.IOCLogInfo)));
5512 sas_device_put(sas_device);
5516 * _scsih_turn_off_pfa_led - turn off Fault LED
5517 * @ioc: per adapter object
5518 * @sas_device: sas device whose PFA LED has to turned off
5522 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5523 struct _sas_device *sas_device)
5525 Mpi2SepReply_t mpi_reply;
5526 Mpi2SepRequest_t mpi_request;
5528 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5529 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5530 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5531 mpi_request.SlotStatus = 0;
5532 mpi_request.Slot = cpu_to_le16(sas_device->slot);
5533 mpi_request.DevHandle = 0;
5534 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5535 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5536 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5537 &mpi_request)) != 0) {
5538 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5539 __FILE__, __LINE__, __func__);
5543 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5545 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5546 le16_to_cpu(mpi_reply.IOCStatus),
5547 le32_to_cpu(mpi_reply.IOCLogInfo)));
5553 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5554 * @ioc: per adapter object
5555 * @handle: device handle
5556 * Context: interrupt.
5559 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5561 struct fw_event_work *fw_event;
5563 fw_event = alloc_fw_event_work(0);
5566 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5567 fw_event->device_handle = handle;
5568 fw_event->ioc = ioc;
5569 _scsih_fw_event_add(ioc, fw_event);
5570 fw_event_work_put(fw_event);
5574 * _scsih_smart_predicted_fault - process smart errors
5575 * @ioc: per adapter object
5576 * @handle: device handle
5577 * Context: interrupt.
5580 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5582 struct scsi_target *starget;
5583 struct MPT3SAS_TARGET *sas_target_priv_data;
5584 Mpi2EventNotificationReply_t *event_reply;
5585 Mpi2EventDataSasDeviceStatusChange_t *event_data;
5586 struct _sas_device *sas_device;
5588 unsigned long flags;
5590 /* only handle non-raid devices */
5591 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5592 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5596 starget = sas_device->starget;
5597 sas_target_priv_data = starget->hostdata;
5599 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5600 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5603 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5605 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5607 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5608 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5610 /* insert into event log */
5611 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5612 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5613 event_reply = kzalloc(sz, GFP_ATOMIC);
5615 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5616 __FILE__, __LINE__, __func__);
5620 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5621 event_reply->Event =
5622 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5623 event_reply->MsgLength = sz/4;
5624 event_reply->EventDataLength =
5625 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5626 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5627 event_reply->EventData;
5628 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5629 event_data->ASC = 0x5D;
5630 event_data->DevHandle = cpu_to_le16(handle);
5631 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5632 mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5636 sas_device_put(sas_device);
5640 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5645 * _scsih_io_done - scsi request callback
5646 * @ioc: per adapter object
5647 * @smid: system request message index
5648 * @msix_index: MSIX table index supplied by the OS
5649 * @reply: reply message frame(lower 32bit addr)
5651 * Callback handler when using _scsih_qcmd.
5653 * Return: 1 meaning mf should be freed from _base_interrupt
5654 * 0 means the mf is freed from this function.
5657 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5659 Mpi25SCSIIORequest_t *mpi_request;
5660 Mpi2SCSIIOReply_t *mpi_reply;
5661 struct scsi_cmnd *scmd;
5662 struct scsiio_tracker *st;
5668 struct MPT3SAS_DEVICE *sas_device_priv_data;
5669 u32 response_code = 0;
5671 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5673 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5677 _scsih_set_satl_pending(scmd, false);
5679 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5681 if (mpi_reply == NULL) {
5682 scmd->result = DID_OK << 16;
5686 sas_device_priv_data = scmd->device->hostdata;
5687 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5688 sas_device_priv_data->sas_target->deleted) {
5689 scmd->result = DID_NO_CONNECT << 16;
5692 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5695 * WARPDRIVE: If direct_io is set then it is directIO,
5696 * the failed direct I/O should be redirected to volume
5698 st = scsi_cmd_priv(scmd);
5699 if (st->direct_io &&
5700 ((ioc_status & MPI2_IOCSTATUS_MASK)
5701 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5704 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5705 mpi_request->DevHandle =
5706 cpu_to_le16(sas_device_priv_data->sas_target->handle);
5707 ioc->put_smid_scsi_io(ioc, smid,
5708 sas_device_priv_data->sas_target->handle);
5711 /* turning off TLR */
5712 scsi_state = mpi_reply->SCSIState;
5713 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5715 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5716 if (!sas_device_priv_data->tlr_snoop_check) {
5717 sas_device_priv_data->tlr_snoop_check++;
5718 if ((!ioc->is_warpdrive &&
5719 !scsih_is_raid(&scmd->device->sdev_gendev) &&
5720 !scsih_is_nvme(&scmd->device->sdev_gendev))
5721 && sas_is_tlr_enabled(scmd->device) &&
5722 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5723 sas_disable_tlr(scmd->device);
5724 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5728 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5729 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5730 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5731 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5734 ioc_status &= MPI2_IOCSTATUS_MASK;
5735 scsi_status = mpi_reply->SCSIStatus;
5737 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5738 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5739 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5740 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5741 ioc_status = MPI2_IOCSTATUS_SUCCESS;
5744 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5745 struct sense_info data;
5746 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5748 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5749 le32_to_cpu(mpi_reply->SenseCount));
5750 memcpy(scmd->sense_buffer, sense_data, sz);
5751 _scsih_normalize_sense(scmd->sense_buffer, &data);
5752 /* failure prediction threshold exceeded */
5753 if (data.asc == 0x5D)
5754 _scsih_smart_predicted_fault(ioc,
5755 le16_to_cpu(mpi_reply->DevHandle));
5756 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5758 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5759 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5760 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5761 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5762 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5764 switch (ioc_status) {
5765 case MPI2_IOCSTATUS_BUSY:
5766 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5767 scmd->result = SAM_STAT_BUSY;
5770 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5771 scmd->result = DID_NO_CONNECT << 16;
5774 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5775 if (sas_device_priv_data->block) {
5776 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5779 if (log_info == 0x31110630) {
5780 if (scmd->retries > 2) {
5781 scmd->result = DID_NO_CONNECT << 16;
5782 scsi_device_set_state(scmd->device,
5785 scmd->result = DID_SOFT_ERROR << 16;
5786 scmd->device->expecting_cc_ua = 1;
5789 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5790 scmd->result = DID_RESET << 16;
5792 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5793 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5794 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5795 scmd->result = DID_RESET << 16;
5798 scmd->result = DID_SOFT_ERROR << 16;
5800 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5801 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5802 scmd->result = DID_RESET << 16;
5805 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5806 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5807 scmd->result = DID_SOFT_ERROR << 16;
5809 scmd->result = (DID_OK << 16) | scsi_status;
5812 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5813 scmd->result = (DID_OK << 16) | scsi_status;
5815 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5818 if (xfer_cnt < scmd->underflow) {
5819 if (scsi_status == SAM_STAT_BUSY)
5820 scmd->result = SAM_STAT_BUSY;
5822 scmd->result = DID_SOFT_ERROR << 16;
5823 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5824 MPI2_SCSI_STATE_NO_SCSI_STATUS))
5825 scmd->result = DID_SOFT_ERROR << 16;
5826 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5827 scmd->result = DID_RESET << 16;
5828 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5829 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5830 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5831 scmd->result = (DRIVER_SENSE << 24) |
5832 SAM_STAT_CHECK_CONDITION;
5833 scmd->sense_buffer[0] = 0x70;
5834 scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5835 scmd->sense_buffer[12] = 0x20;
5836 scmd->sense_buffer[13] = 0;
5840 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5841 scsi_set_resid(scmd, 0);
5843 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5844 case MPI2_IOCSTATUS_SUCCESS:
5845 scmd->result = (DID_OK << 16) | scsi_status;
5846 if (response_code ==
5847 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5848 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5849 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5850 scmd->result = DID_SOFT_ERROR << 16;
5851 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5852 scmd->result = DID_RESET << 16;
5855 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5856 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5857 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5858 _scsih_eedp_error_handling(scmd, ioc_status);
5861 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5862 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5863 case MPI2_IOCSTATUS_INVALID_SGL:
5864 case MPI2_IOCSTATUS_INTERNAL_ERROR:
5865 case MPI2_IOCSTATUS_INVALID_FIELD:
5866 case MPI2_IOCSTATUS_INVALID_STATE:
5867 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5868 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5869 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5871 scmd->result = DID_SOFT_ERROR << 16;
5876 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5877 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5881 scsi_dma_unmap(scmd);
5882 mpt3sas_base_free_smid(ioc, smid);
5883 scmd->scsi_done(scmd);
5888 * _scsih_update_vphys_after_reset - update the Port's
5889 * vphys_list after reset
5890 * @ioc: per adapter object
5895 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5899 Mpi2ConfigReply_t mpi_reply;
5900 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5901 u16 attached_handle;
5902 u64 attached_sas_addr;
5903 u8 found = 0, port_id;
5904 Mpi2SasPhyPage0_t phy_pg0;
5905 struct hba_port *port, *port_next, *mport;
5906 struct virtual_phy *vphy, *vphy_next;
5907 struct _sas_device *sas_device;
5910 * Mark all the vphys objects as dirty.
5912 list_for_each_entry_safe(port, port_next,
5913 &ioc->port_table_list, list) {
5914 if (!port->vphys_mask)
5916 list_for_each_entry_safe(vphy, vphy_next,
5917 &port->vphys_list, list) {
5918 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5923 * Read SASIOUnitPage0 to get each HBA Phy's data.
5925 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) +
5926 (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t));
5927 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5928 if (!sas_iounit_pg0) {
5929 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5930 __FILE__, __LINE__, __func__);
5933 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5934 sas_iounit_pg0, sz)) != 0)
5936 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5937 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5940 * Loop over each HBA Phy.
5942 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5944 * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
5946 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
5947 MPI2_SAS_NEG_LINK_RATE_1_5)
5950 * Check whether Phy is connected to SEP device or not,
5951 * if it is SEP device then read the Phy's SASPHYPage0 data to
5952 * determine whether Phy is a virtual Phy or not. if it is
5953 * virtual phy then it is conformed that the attached remote
5954 * device is a HBA's vSES device.
5957 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
5958 MPI2_SAS_DEVICE_INFO_SEP))
5961 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5963 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5964 __FILE__, __LINE__, __func__);
5968 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
5969 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
5972 * Get the vSES device's SAS Address.
5974 attached_handle = le16_to_cpu(
5975 sas_iounit_pg0->PhyData[i].AttachedDevHandle);
5976 if (_scsih_get_sas_address(ioc, attached_handle,
5977 &attached_sas_addr) != 0) {
5978 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5979 __FILE__, __LINE__, __func__);
5984 port = port_next = NULL;
5986 * Loop over each virtual_phy object from
5987 * each port's vphys_list.
5989 list_for_each_entry_safe(port,
5990 port_next, &ioc->port_table_list, list) {
5991 if (!port->vphys_mask)
5993 list_for_each_entry_safe(vphy, vphy_next,
5994 &port->vphys_list, list) {
5996 * Continue with next virtual_phy object
5997 * if the object is not marked as dirty.
5999 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6003 * Continue with next virtual_phy object
6004 * if the object's SAS Address is not equals
6005 * to current Phy's vSES device SAS Address.
6007 if (vphy->sas_address != attached_sas_addr)
6010 * Enable current Phy number bit in object's
6013 if (!(vphy->phy_mask & (1 << i)))
6014 vphy->phy_mask = (1 << i);
6016 * Get hba_port object from hba_port table
6017 * corresponding to current phy's Port ID.
6018 * if there is no hba_port object corresponding
6019 * to Phy's Port ID then create a new hba_port
6020 * object & add to hba_port table.
6022 port_id = sas_iounit_pg0->PhyData[i].Port;
6023 mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6026 sizeof(struct hba_port), GFP_KERNEL);
6029 mport->port_id = port_id;
6031 "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6032 __func__, mport, mport->port_id);
6033 list_add_tail(&mport->list,
6034 &ioc->port_table_list);
6037 * If mport & port pointers are not pointing to
6038 * same hba_port object then it means that vSES
6039 * device's Port ID got changed after reset and
6040 * hence move current virtual_phy object from
6041 * port's vphys_list to mport's vphys_list.
6043 if (port != mport) {
6044 if (!mport->vphys_mask)
6046 &mport->vphys_list);
6047 mport->vphys_mask |= (1 << i);
6048 port->vphys_mask &= ~(1 << i);
6049 list_move(&vphy->list,
6050 &mport->vphys_list);
6051 sas_device = mpt3sas_get_sdev_by_addr(
6052 ioc, attached_sas_addr, port);
6054 sas_device->port = mport;
6057 * Earlier while updating the hba_port table,
6058 * it is determined that there is no other
6059 * direct attached device with mport's Port ID,
6060 * Hence mport was marked as dirty. Only vSES
6061 * device has this Port ID, so unmark the mport
6064 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6065 mport->sas_address = 0;
6066 mport->phy_mask = 0;
6068 ~HBA_PORT_FLAG_DIRTY_PORT;
6071 * Unmark current virtual_phy object as dirty.
6073 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6082 kfree(sas_iounit_pg0);
6086 * _scsih_get_port_table_after_reset - Construct temporary port table
6087 * @ioc: per adapter object
6088 * @port_table: address where port table needs to be constructed
6090 * return number of HBA port entries available after reset.
6093 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6094 struct hba_port *port_table)
6098 Mpi2ConfigReply_t mpi_reply;
6099 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6100 u16 attached_handle;
6101 u64 attached_sas_addr;
6102 u8 found = 0, port_count = 0, port_id;
6104 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6105 * sizeof(Mpi2SasIOUnit0PhyData_t));
6106 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6107 if (!sas_iounit_pg0) {
6108 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6109 __FILE__, __LINE__, __func__);
6113 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6114 sas_iounit_pg0, sz)) != 0)
6116 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6117 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6119 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6121 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6122 MPI2_SAS_NEG_LINK_RATE_1_5)
6125 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6126 if (_scsih_get_sas_address(
6127 ioc, attached_handle, &attached_sas_addr) != 0) {
6128 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6129 __FILE__, __LINE__, __func__);
6133 for (j = 0; j < port_count; j++) {
6134 port_id = sas_iounit_pg0->PhyData[i].Port;
6135 if (port_table[j].port_id == port_id &&
6136 port_table[j].sas_address == attached_sas_addr) {
6137 port_table[j].phy_mask |= (1 << i);
6146 port_id = sas_iounit_pg0->PhyData[i].Port;
6147 port_table[port_count].port_id = port_id;
6148 port_table[port_count].phy_mask = (1 << i);
6149 port_table[port_count].sas_address = attached_sas_addr;
6153 kfree(sas_iounit_pg0);
6157 enum hba_port_matched_codes {
6159 MATCHED_WITH_ADDR_AND_PHYMASK,
6160 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6161 MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6166 * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
6167 * from HBA port table
6168 * @ioc: per adapter object
6169 * @port_entry - hba port entry from temporary port table which needs to be
6170 * searched for matched entry in the HBA port table
6171 * @matched_port_entry - save matched hba port entry here
6172 * @count - count of matched entries
6174 * return type of matched entry found.
6176 static enum hba_port_matched_codes
6177 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6178 struct hba_port *port_entry,
6179 struct hba_port **matched_port_entry, int *count)
6181 struct hba_port *port_table_entry, *matched_port = NULL;
6182 enum hba_port_matched_codes matched_code = NOT_MATCHED;
6184 *matched_port_entry = NULL;
6186 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6187 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6190 if ((port_table_entry->sas_address == port_entry->sas_address)
6191 && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6192 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6193 matched_port = port_table_entry;
6197 if ((port_table_entry->sas_address == port_entry->sas_address)
6198 && (port_table_entry->phy_mask & port_entry->phy_mask)
6199 && (port_table_entry->port_id == port_entry->port_id)) {
6200 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6201 matched_port = port_table_entry;
6205 if ((port_table_entry->sas_address == port_entry->sas_address)
6206 && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6208 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6210 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6211 matched_port = port_table_entry;
6215 if (port_table_entry->sas_address == port_entry->sas_address) {
6217 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6219 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6221 matched_code = MATCHED_WITH_ADDR;
6222 matched_port = port_table_entry;
6227 *matched_port_entry = matched_port;
6228 if (matched_code == MATCHED_WITH_ADDR)
6230 return matched_code;
6234 * _scsih_del_phy_part_of_anther_port - remove phy if it
6235 * is a part of anther port
6236 *@ioc: per adapter object
6237 *@port_table: port table after reset
6238 *@index: hba port entry index
6239 *@port_count: number of ports available after host reset
6240 *@offset: HBA phy bit offset
6244 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6245 struct hba_port *port_table,
6246 int index, u8 port_count, int offset)
6248 struct _sas_node *sas_node = &ioc->sas_hba;
6251 for (i = 0; i < port_count; i++) {
6255 if (port_table[i].phy_mask & (1 << offset)) {
6256 mpt3sas_transport_del_phy_from_an_existing_port(
6257 ioc, sas_node, &sas_node->phy[offset]);
6263 port_table[index].phy_mask |= (1 << offset);
6267 * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
6269 *@ioc: per adapter object
6270 *@hba_port_entry: hba port table entry
6271 *@port_table: temporary port table
6272 *@index: hba port entry index
6273 *@port_count: number of ports available after host reset
6277 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6278 struct hba_port *hba_port_entry, struct hba_port *port_table,
6279 int index, int port_count)
6281 u32 phy_mask, offset = 0;
6282 struct _sas_node *sas_node = &ioc->sas_hba;
6284 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6286 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6287 if (phy_mask & (1 << offset)) {
6288 if (!(port_table[index].phy_mask & (1 << offset))) {
6289 _scsih_del_phy_part_of_anther_port(
6290 ioc, port_table, index, port_count,
6294 if (sas_node->phy[offset].phy_belongs_to_port)
6295 mpt3sas_transport_del_phy_from_an_existing_port(
6296 ioc, sas_node, &sas_node->phy[offset]);
6297 mpt3sas_transport_add_phy_to_an_existing_port(
6298 ioc, sas_node, &sas_node->phy[offset],
6299 hba_port_entry->sas_address,
6306 * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
6307 * @ioc: per adapter object
6312 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6314 struct hba_port *port, *port_next;
6315 struct virtual_phy *vphy, *vphy_next;
6317 list_for_each_entry_safe(port, port_next,
6318 &ioc->port_table_list, list) {
6319 if (!port->vphys_mask)
6321 list_for_each_entry_safe(vphy, vphy_next,
6322 &port->vphys_list, list) {
6323 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6324 drsprintk(ioc, ioc_info(ioc,
6325 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6326 vphy, port->port_id,
6328 port->vphys_mask &= ~vphy->phy_mask;
6329 list_del(&vphy->list);
6333 if (!port->vphys_mask && !port->sas_address)
6334 port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6339 * _scsih_del_dirty_port_entries - delete dirty port entries from port list
6341 *@ioc: per adapter object
6345 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6347 struct hba_port *port, *port_next;
6349 list_for_each_entry_safe(port, port_next,
6350 &ioc->port_table_list, list) {
6351 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6352 port->flags & HBA_PORT_FLAG_NEW_PORT)
6355 drsprintk(ioc, ioc_info(ioc,
6356 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6357 port, port->port_id, port->phy_mask));
6358 list_del(&port->list);
6364 * _scsih_sas_port_refresh - Update HBA port table after host reset
6365 * @ioc: per adapter object
6368 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6371 struct hba_port *port_table;
6372 struct hba_port *port_table_entry;
6373 struct hba_port *port_entry = NULL;
6374 int i, j, count = 0, lcount = 0;
6378 drsprintk(ioc, ioc_info(ioc,
6379 "updating ports for sas_host(0x%016llx)\n",
6380 (unsigned long long)ioc->sas_hba.sas_address));
6382 port_table = kcalloc(ioc->sas_hba.num_phys,
6383 sizeof(struct hba_port), GFP_KERNEL);
6387 port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6391 drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6392 for (j = 0; j < port_count; j++)
6393 drsprintk(ioc, ioc_info(ioc,
6394 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6395 port_table[j].port_id,
6396 port_table[j].phy_mask, port_table[j].sas_address));
6398 list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6399 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6401 drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6402 port_table_entry = NULL;
6403 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6404 drsprintk(ioc, ioc_info(ioc,
6405 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6406 port_table_entry->port_id,
6407 port_table_entry->phy_mask,
6408 port_table_entry->sas_address));
6411 for (j = 0; j < port_count; j++) {
6412 ret = _scsih_look_and_get_matched_port_entry(ioc,
6413 &port_table[j], &port_entry, &count);
6415 drsprintk(ioc, ioc_info(ioc,
6416 "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6417 port_table[j].sas_address,
6418 port_table[j].port_id));
6423 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6424 case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6425 _scsih_add_or_del_phys_from_existing_port(ioc,
6426 port_entry, port_table, j, port_count);
6428 case MATCHED_WITH_ADDR:
6429 sas_addr = port_table[j].sas_address;
6430 for (i = 0; i < port_count; i++) {
6431 if (port_table[i].sas_address == sas_addr)
6435 if (count > 1 || lcount > 1)
6438 _scsih_add_or_del_phys_from_existing_port(ioc,
6439 port_entry, port_table, j, port_count);
6445 if (port_entry->port_id != port_table[j].port_id)
6446 port_entry->port_id = port_table[j].port_id;
6447 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6448 port_entry->phy_mask = port_table[j].phy_mask;
6451 port_table_entry = NULL;
6455 * _scsih_alloc_vphy - allocate virtual_phy object
6456 * @ioc: per adapter object
6457 * @port_id: Port ID number
6458 * @phy_num: HBA Phy number
6460 * Returns allocated virtual_phy object.
6462 static struct virtual_phy *
6463 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6465 struct virtual_phy *vphy;
6466 struct hba_port *port;
6468 port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6472 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6474 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6479 * Enable bit corresponding to HBA phy number on its
6480 * parent hba_port object's vphys_mask field.
6482 port->vphys_mask |= (1 << phy_num);
6483 vphy->phy_mask |= (1 << phy_num);
6485 INIT_LIST_HEAD(&port->vphys_list);
6486 list_add_tail(&vphy->list, &port->vphys_list);
6489 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6490 vphy, port->port_id, phy_num);
6496 * _scsih_sas_host_refresh - refreshing sas host object contents
6497 * @ioc: per adapter object
6500 * During port enable, fw will send topology events for every device. Its
6501 * possible that the handles may change from the previous setting, so this
6502 * code keeping handles updating if changed.
6505 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6510 Mpi2ConfigReply_t mpi_reply;
6511 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6512 u16 attached_handle;
6513 u8 link_rate, port_id;
6514 struct hba_port *port;
6515 Mpi2SasPhyPage0_t phy_pg0;
6518 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6519 (u64)ioc->sas_hba.sas_address));
6521 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6522 * sizeof(Mpi2SasIOUnit0PhyData_t));
6523 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6524 if (!sas_iounit_pg0) {
6525 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6526 __FILE__, __LINE__, __func__);
6530 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6531 sas_iounit_pg0, sz)) != 0)
6533 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6534 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6536 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6537 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6539 ioc->sas_hba.handle = le16_to_cpu(
6540 sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6541 port_id = sas_iounit_pg0->PhyData[i].Port;
6542 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6543 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6547 port->port_id = port_id;
6549 "hba_port entry: %p, port: %d is added to hba_port list\n",
6550 port, port->port_id);
6551 if (ioc->shost_recovery)
6552 port->flags = HBA_PORT_FLAG_NEW_PORT;
6553 list_add_tail(&port->list, &ioc->port_table_list);
6556 * Check whether current Phy belongs to HBA vSES device or not.
6558 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6559 MPI2_SAS_DEVICE_INFO_SEP &&
6560 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
6561 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6564 "failure at %s:%d/%s()!\n",
6565 __FILE__, __LINE__, __func__);
6568 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6569 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6572 * Allocate a virtual_phy object for vSES device, if
6573 * this vSES device is hot added.
6575 if (!_scsih_alloc_vphy(ioc, port_id, i))
6577 ioc->sas_hba.phy[i].hba_vphy = 1;
6580 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6581 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6583 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6584 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6585 ioc->sas_hba.phy[i].port =
6586 mpt3sas_get_port_by_id(ioc, port_id, 0);
6587 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6588 attached_handle, i, link_rate,
6589 ioc->sas_hba.phy[i].port);
6592 kfree(sas_iounit_pg0);
6596 * _scsih_sas_host_add - create sas host object
6597 * @ioc: per adapter object
6599 * Creating host side data object, stored in ioc->sas_hba
6602 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6605 Mpi2ConfigReply_t mpi_reply;
6606 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6607 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6608 Mpi2SasPhyPage0_t phy_pg0;
6609 Mpi2SasDevicePage0_t sas_device_pg0;
6610 Mpi2SasEnclosurePage0_t enclosure_pg0;
6613 u8 device_missing_delay;
6614 u8 num_phys, port_id;
6615 struct hba_port *port;
6617 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6619 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6620 __FILE__, __LINE__, __func__);
6623 ioc->sas_hba.phy = kcalloc(num_phys,
6624 sizeof(struct _sas_phy), GFP_KERNEL);
6625 if (!ioc->sas_hba.phy) {
6626 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6627 __FILE__, __LINE__, __func__);
6630 ioc->sas_hba.num_phys = num_phys;
6632 /* sas_iounit page 0 */
6633 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
6634 sizeof(Mpi2SasIOUnit0PhyData_t));
6635 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6636 if (!sas_iounit_pg0) {
6637 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6638 __FILE__, __LINE__, __func__);
6641 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6642 sas_iounit_pg0, sz))) {
6643 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6644 __FILE__, __LINE__, __func__);
6647 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6648 MPI2_IOCSTATUS_MASK;
6649 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6650 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6651 __FILE__, __LINE__, __func__);
6655 /* sas_iounit page 1 */
6656 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
6657 sizeof(Mpi2SasIOUnit1PhyData_t));
6658 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6659 if (!sas_iounit_pg1) {
6660 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6661 __FILE__, __LINE__, __func__);
6664 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6665 sas_iounit_pg1, sz))) {
6666 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6667 __FILE__, __LINE__, __func__);
6670 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6671 MPI2_IOCSTATUS_MASK;
6672 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6673 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6674 __FILE__, __LINE__, __func__);
6678 ioc->io_missing_delay =
6679 sas_iounit_pg1->IODeviceMissingDelay;
6680 device_missing_delay =
6681 sas_iounit_pg1->ReportDeviceMissingDelay;
6682 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6683 ioc->device_missing_delay = (device_missing_delay &
6684 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6686 ioc->device_missing_delay = device_missing_delay &
6687 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6689 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
6690 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6691 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6693 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6694 __FILE__, __LINE__, __func__);
6697 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6698 MPI2_IOCSTATUS_MASK;
6699 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6700 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6701 __FILE__, __LINE__, __func__);
6706 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6707 PhyData[0].ControllerDevHandle);
6709 port_id = sas_iounit_pg0->PhyData[i].Port;
6710 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6711 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6715 port->port_id = port_id;
6717 "hba_port entry: %p, port: %d is added to hba_port list\n",
6718 port, port->port_id);
6719 list_add_tail(&port->list,
6720 &ioc->port_table_list);
6724 * Check whether current Phy belongs to HBA vSES device or not.
6726 if ((le32_to_cpu(phy_pg0.PhyInfo) &
6727 MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6728 (phy_pg0.NegotiatedLinkRate >> 4) >=
6729 MPI2_SAS_NEG_LINK_RATE_1_5) {
6731 * Allocate a virtual_phy object for vSES device.
6733 if (!_scsih_alloc_vphy(ioc, port_id, i))
6735 ioc->sas_hba.phy[i].hba_vphy = 1;
6738 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6739 ioc->sas_hba.phy[i].phy_id = i;
6740 ioc->sas_hba.phy[i].port =
6741 mpt3sas_get_port_by_id(ioc, port_id, 0);
6742 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6743 phy_pg0, ioc->sas_hba.parent_dev);
6745 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6746 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
6747 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6748 __FILE__, __LINE__, __func__);
6751 ioc->sas_hba.enclosure_handle =
6752 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6753 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6754 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6755 ioc->sas_hba.handle,
6756 (u64)ioc->sas_hba.sas_address,
6757 ioc->sas_hba.num_phys);
6759 if (ioc->sas_hba.enclosure_handle) {
6760 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6761 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6762 ioc->sas_hba.enclosure_handle)))
6763 ioc->sas_hba.enclosure_logical_id =
6764 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6768 kfree(sas_iounit_pg1);
6769 kfree(sas_iounit_pg0);
6773 * _scsih_expander_add - creating expander object
6774 * @ioc: per adapter object
6775 * @handle: expander handle
6777 * Creating expander object, stored in ioc->sas_expander_list.
6779 * Return: 0 for success, else error.
6782 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6784 struct _sas_node *sas_expander;
6785 struct _enclosure_node *enclosure_dev;
6786 Mpi2ConfigReply_t mpi_reply;
6787 Mpi2ExpanderPage0_t expander_pg0;
6788 Mpi2ExpanderPage1_t expander_pg1;
6791 u64 sas_address, sas_address_parent = 0;
6793 unsigned long flags;
6794 struct _sas_port *mpt3sas_port = NULL;
6802 if (ioc->shost_recovery || ioc->pci_error_recovery)
6805 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6806 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
6807 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6808 __FILE__, __LINE__, __func__);
6812 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6813 MPI2_IOCSTATUS_MASK;
6814 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6815 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6816 __FILE__, __LINE__, __func__);
6820 /* handle out of order topology events */
6821 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6822 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6824 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6825 __FILE__, __LINE__, __func__);
6829 port_id = expander_pg0.PhysicalPort;
6830 if (sas_address_parent != ioc->sas_hba.sas_address) {
6831 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6832 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6834 mpt3sas_get_port_by_id(ioc, port_id, 0));
6835 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6836 if (!sas_expander) {
6837 rc = _scsih_expander_add(ioc, parent_handle);
6843 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6844 sas_address = le64_to_cpu(expander_pg0.SASAddress);
6845 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6846 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
6847 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6852 sas_expander = kzalloc(sizeof(struct _sas_node),
6854 if (!sas_expander) {
6855 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6856 __FILE__, __LINE__, __func__);
6860 sas_expander->handle = handle;
6861 sas_expander->num_phys = expander_pg0.NumPhys;
6862 sas_expander->sas_address_parent = sas_address_parent;
6863 sas_expander->sas_address = sas_address;
6864 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6865 if (!sas_expander->port) {
6866 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6867 __FILE__, __LINE__, __func__);
6872 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6873 handle, parent_handle,
6874 (u64)sas_expander->sas_address, sas_expander->num_phys);
6876 if (!sas_expander->num_phys)
6878 sas_expander->phy = kcalloc(sas_expander->num_phys,
6879 sizeof(struct _sas_phy), GFP_KERNEL);
6880 if (!sas_expander->phy) {
6881 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6882 __FILE__, __LINE__, __func__);
6887 INIT_LIST_HEAD(&sas_expander->sas_port_list);
6888 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6889 sas_address_parent, sas_expander->port);
6890 if (!mpt3sas_port) {
6891 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6892 __FILE__, __LINE__, __func__);
6896 sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
6897 sas_expander->rphy = mpt3sas_port->rphy;
6899 for (i = 0 ; i < sas_expander->num_phys ; i++) {
6900 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
6901 &expander_pg1, i, handle))) {
6902 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6903 __FILE__, __LINE__, __func__);
6907 sas_expander->phy[i].handle = handle;
6908 sas_expander->phy[i].phy_id = i;
6909 sas_expander->phy[i].port =
6910 mpt3sas_get_port_by_id(ioc, port_id, 0);
6912 if ((mpt3sas_transport_add_expander_phy(ioc,
6913 &sas_expander->phy[i], expander_pg1,
6914 sas_expander->parent_dev))) {
6915 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6916 __FILE__, __LINE__, __func__);
6922 if (sas_expander->enclosure_handle) {
6924 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6925 sas_expander->enclosure_handle);
6927 sas_expander->enclosure_logical_id =
6928 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6931 _scsih_expander_node_add(ioc, sas_expander);
6937 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
6938 sas_address_parent, sas_expander->port);
6939 kfree(sas_expander);
6944 * mpt3sas_expander_remove - removing expander object
6945 * @ioc: per adapter object
6946 * @sas_address: expander sas_address
6949 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
6950 struct hba_port *port)
6952 struct _sas_node *sas_expander;
6953 unsigned long flags;
6955 if (ioc->shost_recovery)
6961 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6962 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6964 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6966 _scsih_expander_node_remove(ioc, sas_expander);
6970 * _scsih_done - internal SCSI_IO callback handler.
6971 * @ioc: per adapter object
6972 * @smid: system request message index
6973 * @msix_index: MSIX table index supplied by the OS
6974 * @reply: reply message frame(lower 32bit addr)
6976 * Callback handler when sending internal generated SCSI_IO.
6977 * The callback index passed is `ioc->scsih_cb_idx`
6979 * Return: 1 meaning mf should be freed from _base_interrupt
6980 * 0 means the mf is freed from this function.
6983 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
6985 MPI2DefaultReply_t *mpi_reply;
6987 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
6988 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
6990 if (ioc->scsih_cmds.smid != smid)
6992 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
6994 memcpy(ioc->scsih_cmds.reply, mpi_reply,
6995 mpi_reply->MsgLength*4);
6996 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
6998 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
6999 complete(&ioc->scsih_cmds.done);
7006 #define MPT3_MAX_LUNS (255)
7010 * _scsih_check_access_status - check access flags
7011 * @ioc: per adapter object
7012 * @sas_address: sas address
7013 * @handle: sas device handle
7014 * @access_status: errors returned during discovery of the device
7016 * Return: 0 for success, else failure
7019 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7020 u16 handle, u8 access_status)
7025 switch (access_status) {
7026 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7027 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7030 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7031 desc = "sata capability failed";
7033 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7034 desc = "sata affiliation conflict";
7036 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7037 desc = "route not addressable";
7039 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7040 desc = "smp error not addressable";
7042 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7043 desc = "device blocked";
7045 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7046 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7047 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7048 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7049 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7050 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7051 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7052 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7053 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7054 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7055 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7056 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7057 desc = "sata initialization failed";
7067 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7068 desc, (u64)sas_address, handle);
7073 * _scsih_check_device - checking device responsiveness
7074 * @ioc: per adapter object
7075 * @parent_sas_address: sas address of parent expander or sas host
7076 * @handle: attached device handle
7077 * @phy_number: phy number
7078 * @link_rate: new link rate
7081 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7082 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7084 Mpi2ConfigReply_t mpi_reply;
7085 Mpi2SasDevicePage0_t sas_device_pg0;
7086 struct _sas_device *sas_device = NULL;
7087 struct _enclosure_node *enclosure_dev = NULL;
7089 unsigned long flags;
7091 struct scsi_target *starget;
7092 struct MPT3SAS_TARGET *sas_target_priv_data;
7094 struct hba_port *port;
7096 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7097 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7100 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7101 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7104 /* wide port handling ~ we need only handle device once for the phy that
7105 * is matched in sas device page zero
7107 if (phy_number != sas_device_pg0.PhyNum)
7110 /* check if this is end device */
7111 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7112 if (!(_scsih_is_end_device(device_info)))
7115 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7116 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7117 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7120 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7126 if (unlikely(sas_device->handle != handle)) {
7127 starget = sas_device->starget;
7128 sas_target_priv_data = starget->hostdata;
7129 starget_printk(KERN_INFO, starget,
7130 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7131 sas_device->handle, handle);
7132 sas_target_priv_data->handle = handle;
7133 sas_device->handle = handle;
7134 if (le16_to_cpu(sas_device_pg0.Flags) &
7135 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7136 sas_device->enclosure_level =
7137 sas_device_pg0.EnclosureLevel;
7138 memcpy(sas_device->connector_name,
7139 sas_device_pg0.ConnectorName, 4);
7140 sas_device->connector_name[4] = '\0';
7142 sas_device->enclosure_level = 0;
7143 sas_device->connector_name[0] = '\0';
7146 sas_device->enclosure_handle =
7147 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7148 sas_device->is_chassis_slot_valid = 0;
7149 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7150 sas_device->enclosure_handle);
7151 if (enclosure_dev) {
7152 sas_device->enclosure_logical_id =
7153 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7154 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7155 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7156 sas_device->is_chassis_slot_valid = 1;
7157 sas_device->chassis_slot =
7158 enclosure_dev->pg0.ChassisSlot;
7163 /* check if device is present */
7164 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7165 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7166 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7171 /* check if there were any issues with discovery */
7172 if (_scsih_check_access_status(ioc, sas_address, handle,
7173 sas_device_pg0.AccessStatus))
7176 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7177 _scsih_ublock_io_device(ioc, sas_address, port);
7180 sas_device_put(sas_device);
7184 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7186 sas_device_put(sas_device);
7190 * _scsih_add_device - creating sas device object
7191 * @ioc: per adapter object
7192 * @handle: sas device handle
7193 * @phy_num: phy number end device attached to
7194 * @is_pd: is this hidden raid component
7196 * Creating end device object, stored in ioc->sas_device_list.
7198 * Return: 0 for success, non-zero for failure.
7201 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7204 Mpi2ConfigReply_t mpi_reply;
7205 Mpi2SasDevicePage0_t sas_device_pg0;
7206 struct _sas_device *sas_device;
7207 struct _enclosure_node *enclosure_dev = NULL;
7213 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7214 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7215 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7216 __FILE__, __LINE__, __func__);
7220 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7221 MPI2_IOCSTATUS_MASK;
7222 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7223 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7224 __FILE__, __LINE__, __func__);
7228 /* check if this is end device */
7229 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7230 if (!(_scsih_is_end_device(device_info)))
7232 set_bit(handle, ioc->pend_os_device_add);
7233 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7235 /* check if device is present */
7236 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7237 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7238 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7243 /* check if there were any issues with discovery */
7244 if (_scsih_check_access_status(ioc, sas_address, handle,
7245 sas_device_pg0.AccessStatus))
7248 port_id = sas_device_pg0.PhysicalPort;
7249 sas_device = mpt3sas_get_sdev_by_addr(ioc,
7250 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7252 clear_bit(handle, ioc->pend_os_device_add);
7253 sas_device_put(sas_device);
7257 if (sas_device_pg0.EnclosureHandle) {
7259 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7260 le16_to_cpu(sas_device_pg0.EnclosureHandle));
7261 if (enclosure_dev == NULL)
7262 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7263 sas_device_pg0.EnclosureHandle);
7266 sas_device = kzalloc(sizeof(struct _sas_device),
7269 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7270 __FILE__, __LINE__, __func__);
7274 kref_init(&sas_device->refcount);
7275 sas_device->handle = handle;
7276 if (_scsih_get_sas_address(ioc,
7277 le16_to_cpu(sas_device_pg0.ParentDevHandle),
7278 &sas_device->sas_address_parent) != 0)
7279 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7280 __FILE__, __LINE__, __func__);
7281 sas_device->enclosure_handle =
7282 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7283 if (sas_device->enclosure_handle != 0)
7285 le16_to_cpu(sas_device_pg0.Slot);
7286 sas_device->device_info = device_info;
7287 sas_device->sas_address = sas_address;
7288 sas_device->phy = sas_device_pg0.PhyNum;
7289 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7290 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7291 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7292 if (!sas_device->port) {
7293 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7294 __FILE__, __LINE__, __func__);
7298 if (le16_to_cpu(sas_device_pg0.Flags)
7299 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7300 sas_device->enclosure_level =
7301 sas_device_pg0.EnclosureLevel;
7302 memcpy(sas_device->connector_name,
7303 sas_device_pg0.ConnectorName, 4);
7304 sas_device->connector_name[4] = '\0';
7306 sas_device->enclosure_level = 0;
7307 sas_device->connector_name[0] = '\0';
7309 /* get enclosure_logical_id & chassis_slot*/
7310 sas_device->is_chassis_slot_valid = 0;
7311 if (enclosure_dev) {
7312 sas_device->enclosure_logical_id =
7313 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7314 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7315 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7316 sas_device->is_chassis_slot_valid = 1;
7317 sas_device->chassis_slot =
7318 enclosure_dev->pg0.ChassisSlot;
7322 /* get device name */
7323 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
7325 if (ioc->wait_for_discovery_to_complete)
7326 _scsih_sas_device_init_add(ioc, sas_device);
7328 _scsih_sas_device_add(ioc, sas_device);
7331 sas_device_put(sas_device);
7336 * _scsih_remove_device - removing sas device object
7337 * @ioc: per adapter object
7338 * @sas_device: the sas_device object
7341 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7342 struct _sas_device *sas_device)
7344 struct MPT3SAS_TARGET *sas_target_priv_data;
7346 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7347 (sas_device->pfa_led_on)) {
7348 _scsih_turn_off_pfa_led(ioc, sas_device);
7349 sas_device->pfa_led_on = 0;
7353 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7355 sas_device->handle, (u64)sas_device->sas_address));
7357 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7360 if (sas_device->starget && sas_device->starget->hostdata) {
7361 sas_target_priv_data = sas_device->starget->hostdata;
7362 sas_target_priv_data->deleted = 1;
7363 _scsih_ublock_io_device(ioc, sas_device->sas_address,
7365 sas_target_priv_data->handle =
7366 MPT3SAS_INVALID_DEVICE_HANDLE;
7369 if (!ioc->hide_drives)
7370 mpt3sas_transport_port_remove(ioc,
7371 sas_device->sas_address,
7372 sas_device->sas_address_parent,
7375 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7376 sas_device->handle, (u64)sas_device->sas_address);
7378 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
7381 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7383 sas_device->handle, (u64)sas_device->sas_address));
7384 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7389 * _scsih_sas_topology_change_event_debug - debug for topology event
7390 * @ioc: per adapter object
7391 * @event_data: event data payload
7395 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7396 Mpi2EventDataSasTopologyChangeList_t *event_data)
7402 char *status_str = NULL;
7403 u8 link_rate, prev_link_rate;
7405 switch (event_data->ExpStatus) {
7406 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7409 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7410 status_str = "remove";
7412 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7414 status_str = "responding";
7416 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7417 status_str = "remove delay";
7420 status_str = "unknown status";
7423 ioc_info(ioc, "sas topology change: (%s)\n", status_str);
7424 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7425 "start_phy(%02d), count(%d)\n",
7426 le16_to_cpu(event_data->ExpanderDevHandle),
7427 le16_to_cpu(event_data->EnclosureHandle),
7428 event_data->StartPhyNum, event_data->NumEntries);
7429 for (i = 0; i < event_data->NumEntries; i++) {
7430 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7433 phy_number = event_data->StartPhyNum + i;
7434 reason_code = event_data->PHY[i].PhyStatus &
7435 MPI2_EVENT_SAS_TOPO_RC_MASK;
7436 switch (reason_code) {
7437 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7438 status_str = "target add";
7440 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7441 status_str = "target remove";
7443 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7444 status_str = "delay target remove";
7446 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7447 status_str = "link rate change";
7449 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7450 status_str = "target responding";
7453 status_str = "unknown";
7456 link_rate = event_data->PHY[i].LinkRate >> 4;
7457 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7458 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7459 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7460 handle, status_str, link_rate, prev_link_rate);
7466 * _scsih_sas_topology_change_event - handle topology changes
7467 * @ioc: per adapter object
7468 * @fw_event: The fw_event_work object
7473 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7474 struct fw_event_work *fw_event)
7477 u16 parent_handle, handle;
7479 u8 phy_number, max_phys;
7480 struct _sas_node *sas_expander;
7482 unsigned long flags;
7483 u8 link_rate, prev_link_rate;
7484 struct hba_port *port;
7485 Mpi2EventDataSasTopologyChangeList_t *event_data =
7486 (Mpi2EventDataSasTopologyChangeList_t *)
7487 fw_event->event_data;
7489 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7490 _scsih_sas_topology_change_event_debug(ioc, event_data);
7492 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7495 if (!ioc->sas_hba.num_phys)
7496 _scsih_sas_host_add(ioc);
7498 _scsih_sas_host_refresh(ioc);
7500 if (fw_event->ignore) {
7501 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
7505 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
7506 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
7508 /* handle expander add */
7509 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7510 if (_scsih_expander_add(ioc, parent_handle) != 0)
7513 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7514 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7517 sas_address = sas_expander->sas_address;
7518 max_phys = sas_expander->num_phys;
7519 port = sas_expander->port;
7520 } else if (parent_handle < ioc->sas_hba.num_phys) {
7521 sas_address = ioc->sas_hba.sas_address;
7522 max_phys = ioc->sas_hba.num_phys;
7524 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7527 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7529 /* handle siblings events */
7530 for (i = 0; i < event_data->NumEntries; i++) {
7531 if (fw_event->ignore) {
7533 ioc_info(ioc, "ignoring expander event\n"));
7536 if (ioc->remove_host || ioc->pci_error_recovery)
7538 phy_number = event_data->StartPhyNum + i;
7539 if (phy_number >= max_phys)
7541 reason_code = event_data->PHY[i].PhyStatus &
7542 MPI2_EVENT_SAS_TOPO_RC_MASK;
7543 if ((event_data->PHY[i].PhyStatus &
7544 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7545 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7547 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7550 link_rate = event_data->PHY[i].LinkRate >> 4;
7551 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7552 switch (reason_code) {
7553 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7555 if (ioc->shost_recovery)
7558 if (link_rate == prev_link_rate)
7561 mpt3sas_transport_update_links(ioc, sas_address,
7562 handle, phy_number, link_rate, port);
7564 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7567 _scsih_check_device(ioc, sas_address, handle,
7568 phy_number, link_rate);
7570 if (!test_bit(handle, ioc->pend_os_device_add))
7575 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7577 if (ioc->shost_recovery)
7580 mpt3sas_transport_update_links(ioc, sas_address,
7581 handle, phy_number, link_rate, port);
7583 _scsih_add_device(ioc, handle, phy_number, 0);
7586 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7588 _scsih_device_remove_by_handle(ioc, handle);
7593 /* handle expander removal */
7594 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7596 mpt3sas_expander_remove(ioc, sas_address, port);
7602 * _scsih_sas_device_status_change_event_debug - debug for device event
7604 * @event_data: event data payload
7608 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7609 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7611 char *reason_str = NULL;
7613 switch (event_data->ReasonCode) {
7614 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7615 reason_str = "smart data";
7617 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7618 reason_str = "unsupported device discovered";
7620 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7621 reason_str = "internal device reset";
7623 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7624 reason_str = "internal task abort";
7626 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7627 reason_str = "internal task abort set";
7629 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7630 reason_str = "internal clear task set";
7632 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7633 reason_str = "internal query task";
7635 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7636 reason_str = "sata init failure";
7638 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7639 reason_str = "internal device reset complete";
7641 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7642 reason_str = "internal task abort complete";
7644 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7645 reason_str = "internal async notification";
7647 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7648 reason_str = "expander reduced functionality";
7650 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7651 reason_str = "expander reduced functionality complete";
7654 reason_str = "unknown reason";
7657 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7658 reason_str, le16_to_cpu(event_data->DevHandle),
7659 (u64)le64_to_cpu(event_data->SASAddress),
7660 le16_to_cpu(event_data->TaskTag));
7661 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
7662 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7663 event_data->ASC, event_data->ASCQ);
7668 * _scsih_sas_device_status_change_event - handle device status change
7669 * @ioc: per adapter object
7670 * @event_data: The fw event
7674 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7675 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7677 struct MPT3SAS_TARGET *target_priv_data;
7678 struct _sas_device *sas_device;
7680 unsigned long flags;
7682 /* In MPI Revision K (0xC), the internal device reset complete was
7683 * implemented, so avoid setting tm_busy flag for older firmware.
7685 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7688 if (event_data->ReasonCode !=
7689 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7690 event_data->ReasonCode !=
7691 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7694 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7695 sas_address = le64_to_cpu(event_data->SASAddress);
7696 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7698 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
7700 if (!sas_device || !sas_device->starget)
7703 target_priv_data = sas_device->starget->hostdata;
7704 if (!target_priv_data)
7707 if (event_data->ReasonCode ==
7708 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7709 target_priv_data->tm_busy = 1;
7711 target_priv_data->tm_busy = 0;
7713 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7715 "%s tm_busy flag for handle(0x%04x)\n",
7716 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7717 target_priv_data->handle);
7721 sas_device_put(sas_device);
7723 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7728 * _scsih_check_pcie_access_status - check access flags
7729 * @ioc: per adapter object
7731 * @handle: sas device handle
7732 * @access_status: errors returned during discovery of the device
7734 * Return: 0 for success, else failure
7737 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7738 u16 handle, u8 access_status)
7743 switch (access_status) {
7744 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7745 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7748 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7749 desc = "PCIe device capability failed";
7751 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7752 desc = "PCIe device blocked";
7754 "Device with Access Status (%s): wwid(0x%016llx), "
7755 "handle(0x%04x)\n ll only be added to the internal list",
7756 desc, (u64)wwid, handle);
7759 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7760 desc = "PCIe device mem space access failed";
7762 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7763 desc = "PCIe device unsupported";
7765 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7766 desc = "PCIe device MSIx Required";
7768 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7769 desc = "PCIe device init fail max";
7771 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7772 desc = "PCIe device status unknown";
7774 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7775 desc = "nvme ready timeout";
7777 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7778 desc = "nvme device configuration unsupported";
7780 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7781 desc = "nvme identify failed";
7783 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7784 desc = "nvme qconfig failed";
7786 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7787 desc = "nvme qcreation failed";
7789 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7790 desc = "nvme eventcfg failed";
7792 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7793 desc = "nvme get feature stat failed";
7795 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7796 desc = "nvme idle timeout";
7798 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7799 desc = "nvme failure status";
7802 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7803 access_status, (u64)wwid, handle);
7810 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7811 desc, (u64)wwid, handle);
7816 * _scsih_pcie_device_remove_from_sml - removing pcie device
7817 * from SML and free up associated memory
7818 * @ioc: per adapter object
7819 * @pcie_device: the pcie_device object
7822 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7823 struct _pcie_device *pcie_device)
7825 struct MPT3SAS_TARGET *sas_target_priv_data;
7828 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7830 pcie_device->handle, (u64)pcie_device->wwid));
7831 if (pcie_device->enclosure_handle != 0)
7833 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7835 (u64)pcie_device->enclosure_logical_id,
7836 pcie_device->slot));
7837 if (pcie_device->connector_name[0] != '\0')
7839 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7841 pcie_device->enclosure_level,
7842 pcie_device->connector_name));
7844 if (pcie_device->starget && pcie_device->starget->hostdata) {
7845 sas_target_priv_data = pcie_device->starget->hostdata;
7846 sas_target_priv_data->deleted = 1;
7847 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
7848 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7851 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7852 pcie_device->handle, (u64)pcie_device->wwid);
7853 if (pcie_device->enclosure_handle != 0)
7854 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7855 (u64)pcie_device->enclosure_logical_id,
7857 if (pcie_device->connector_name[0] != '\0')
7858 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7859 pcie_device->enclosure_level,
7860 pcie_device->connector_name);
7862 if (pcie_device->starget && (pcie_device->access_status !=
7863 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
7864 scsi_remove_target(&pcie_device->starget->dev);
7866 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7868 pcie_device->handle, (u64)pcie_device->wwid));
7869 if (pcie_device->enclosure_handle != 0)
7871 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7873 (u64)pcie_device->enclosure_logical_id,
7874 pcie_device->slot));
7875 if (pcie_device->connector_name[0] != '\0')
7877 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7879 pcie_device->enclosure_level,
7880 pcie_device->connector_name));
7882 kfree(pcie_device->serial_number);
7887 * _scsih_pcie_check_device - checking device responsiveness
7888 * @ioc: per adapter object
7889 * @handle: attached device handle
7892 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7894 Mpi2ConfigReply_t mpi_reply;
7895 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7897 struct _pcie_device *pcie_device;
7899 unsigned long flags;
7900 struct scsi_target *starget;
7901 struct MPT3SAS_TARGET *sas_target_priv_data;
7904 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7905 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
7908 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7909 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7912 /* check if this is end device */
7913 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
7914 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
7917 wwid = le64_to_cpu(pcie_device_pg0.WWID);
7918 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7919 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7922 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7926 if (unlikely(pcie_device->handle != handle)) {
7927 starget = pcie_device->starget;
7928 sas_target_priv_data = starget->hostdata;
7929 pcie_device->access_status = pcie_device_pg0.AccessStatus;
7930 starget_printk(KERN_INFO, starget,
7931 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7932 pcie_device->handle, handle);
7933 sas_target_priv_data->handle = handle;
7934 pcie_device->handle = handle;
7936 if (le32_to_cpu(pcie_device_pg0.Flags) &
7937 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
7938 pcie_device->enclosure_level =
7939 pcie_device_pg0.EnclosureLevel;
7940 memcpy(&pcie_device->connector_name[0],
7941 &pcie_device_pg0.ConnectorName[0], 4);
7943 pcie_device->enclosure_level = 0;
7944 pcie_device->connector_name[0] = '\0';
7948 /* check if device is present */
7949 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
7950 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
7951 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
7953 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7954 pcie_device_put(pcie_device);
7958 /* check if there were any issues with discovery */
7959 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
7960 pcie_device_pg0.AccessStatus)) {
7961 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7962 pcie_device_put(pcie_device);
7966 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7967 pcie_device_put(pcie_device);
7969 _scsih_ublock_io_device(ioc, wwid, NULL);
7975 * _scsih_pcie_add_device - creating pcie device object
7976 * @ioc: per adapter object
7977 * @handle: pcie device handle
7979 * Creating end device object, stored in ioc->pcie_device_list.
7981 * Return: 1 means queue the event later, 0 means complete the event
7984 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7986 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7987 Mpi26PCIeDevicePage2_t pcie_device_pg2;
7988 Mpi2ConfigReply_t mpi_reply;
7989 struct _pcie_device *pcie_device;
7990 struct _enclosure_node *enclosure_dev;
7994 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7995 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
7996 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7997 __FILE__, __LINE__, __func__);
8000 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8001 MPI2_IOCSTATUS_MASK;
8002 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8003 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8004 __FILE__, __LINE__, __func__);
8008 set_bit(handle, ioc->pend_os_device_add);
8009 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8011 /* check if device is present */
8012 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8013 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8014 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8019 /* check if there were any issues with discovery */
8020 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8021 pcie_device_pg0.AccessStatus))
8024 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8025 (pcie_device_pg0.DeviceInfo))))
8028 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8030 clear_bit(handle, ioc->pend_os_device_add);
8031 pcie_device_put(pcie_device);
8035 /* PCIe Device Page 2 contains read-only information about a
8036 * specific NVMe device; therefore, this page is only
8037 * valid for NVMe devices and skip for pcie devices of type scsi.
8039 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8040 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8041 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8042 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8045 "failure at %s:%d/%s()!\n", __FILE__,
8046 __LINE__, __func__);
8050 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8051 MPI2_IOCSTATUS_MASK;
8052 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8054 "failure at %s:%d/%s()!\n", __FILE__,
8055 __LINE__, __func__);
8060 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8062 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8063 __FILE__, __LINE__, __func__);
8067 kref_init(&pcie_device->refcount);
8068 pcie_device->id = ioc->pcie_target_id++;
8069 pcie_device->channel = PCIE_CHANNEL;
8070 pcie_device->handle = handle;
8071 pcie_device->access_status = pcie_device_pg0.AccessStatus;
8072 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8073 pcie_device->wwid = wwid;
8074 pcie_device->port_num = pcie_device_pg0.PortNum;
8075 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8076 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8078 pcie_device->enclosure_handle =
8079 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8080 if (pcie_device->enclosure_handle != 0)
8081 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8083 if (le32_to_cpu(pcie_device_pg0.Flags) &
8084 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8085 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8086 memcpy(&pcie_device->connector_name[0],
8087 &pcie_device_pg0.ConnectorName[0], 4);
8089 pcie_device->enclosure_level = 0;
8090 pcie_device->connector_name[0] = '\0';
8093 /* get enclosure_logical_id */
8094 if (pcie_device->enclosure_handle) {
8096 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8097 pcie_device->enclosure_handle);
8099 pcie_device->enclosure_logical_id =
8100 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8102 /* TODO -- Add device name once FW supports it */
8103 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8104 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8105 pcie_device->nvme_mdts =
8106 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
8107 pcie_device->shutdown_latency =
8108 le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8110 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
8111 * if drive's RTD3 Entry Latency is greater then IOC's
8112 * max_shutdown_latency.
8114 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8115 ioc->max_shutdown_latency =
8116 pcie_device->shutdown_latency;
8117 if (pcie_device_pg2.ControllerResetTO)
8118 pcie_device->reset_timeout =
8119 pcie_device_pg2.ControllerResetTO;
8121 pcie_device->reset_timeout = 30;
8123 pcie_device->reset_timeout = 30;
8125 if (ioc->wait_for_discovery_to_complete)
8126 _scsih_pcie_device_init_add(ioc, pcie_device);
8128 _scsih_pcie_device_add(ioc, pcie_device);
8130 pcie_device_put(pcie_device);
8135 * _scsih_pcie_topology_change_event_debug - debug for topology
8137 * @ioc: per adapter object
8138 * @event_data: event data payload
8142 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8143 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8149 char *status_str = NULL;
8150 u8 link_rate, prev_link_rate;
8152 switch (event_data->SwitchStatus) {
8153 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8156 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8157 status_str = "remove";
8159 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8161 status_str = "responding";
8163 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8164 status_str = "remove delay";
8167 status_str = "unknown status";
8170 ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
8171 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8172 "start_port(%02d), count(%d)\n",
8173 le16_to_cpu(event_data->SwitchDevHandle),
8174 le16_to_cpu(event_data->EnclosureHandle),
8175 event_data->StartPortNum, event_data->NumEntries);
8176 for (i = 0; i < event_data->NumEntries; i++) {
8178 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8181 port_number = event_data->StartPortNum + i;
8182 reason_code = event_data->PortEntry[i].PortStatus;
8183 switch (reason_code) {
8184 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8185 status_str = "target add";
8187 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8188 status_str = "target remove";
8190 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8191 status_str = "delay target remove";
8193 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8194 status_str = "link rate change";
8196 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8197 status_str = "target responding";
8200 status_str = "unknown";
8203 link_rate = event_data->PortEntry[i].CurrentPortInfo &
8204 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8205 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8206 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8207 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8208 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
8209 handle, status_str, link_rate, prev_link_rate);
8214 * _scsih_pcie_topology_change_event - handle PCIe topology
8216 * @ioc: per adapter object
8217 * @fw_event: The fw_event_work object
8222 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8223 struct fw_event_work *fw_event)
8228 u8 link_rate, prev_link_rate;
8229 unsigned long flags;
8231 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8232 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8233 struct _pcie_device *pcie_device;
8235 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8236 _scsih_pcie_topology_change_event_debug(ioc, event_data);
8238 if (ioc->shost_recovery || ioc->remove_host ||
8239 ioc->pci_error_recovery)
8242 if (fw_event->ignore) {
8243 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
8247 /* handle siblings events */
8248 for (i = 0; i < event_data->NumEntries; i++) {
8249 if (fw_event->ignore) {
8251 ioc_info(ioc, "ignoring switch event\n"));
8254 if (ioc->remove_host || ioc->pci_error_recovery)
8256 reason_code = event_data->PortEntry[i].PortStatus;
8258 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8262 link_rate = event_data->PortEntry[i].CurrentPortInfo
8263 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8264 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8265 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8267 switch (reason_code) {
8268 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8269 if (ioc->shost_recovery)
8271 if (link_rate == prev_link_rate)
8273 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8276 _scsih_pcie_check_device(ioc, handle);
8278 /* This code after this point handles the test case
8279 * where a device has been added, however its returning
8280 * BUSY for sometime. Then before the Device Missing
8281 * Delay expires and the device becomes READY, the
8282 * device is removed and added back.
8284 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8285 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8286 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8289 pcie_device_put(pcie_device);
8293 if (!test_bit(handle, ioc->pend_os_device_add))
8297 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8299 event_data->PortEntry[i].PortStatus &= 0xF0;
8300 event_data->PortEntry[i].PortStatus |=
8301 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
8303 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8304 if (ioc->shost_recovery)
8306 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8309 rc = _scsih_pcie_add_device(ioc, handle);
8311 /* mark entry vacant */
8312 /* TODO This needs to be reviewed and fixed,
8313 * we dont have an entry
8314 * to make an event void like vacant
8316 event_data->PortEntry[i].PortStatus |=
8317 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8320 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8321 _scsih_pcie_device_remove_by_handle(ioc, handle);
8328 * _scsih_pcie_device_status_change_event_debug - debug for device event
8330 * @event_data: event data payload
8334 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8335 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8337 char *reason_str = NULL;
8339 switch (event_data->ReasonCode) {
8340 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8341 reason_str = "smart data";
8343 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8344 reason_str = "unsupported device discovered";
8346 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8347 reason_str = "internal device reset";
8349 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8350 reason_str = "internal task abort";
8352 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8353 reason_str = "internal task abort set";
8355 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8356 reason_str = "internal clear task set";
8358 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8359 reason_str = "internal query task";
8361 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8362 reason_str = "device init failure";
8364 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8365 reason_str = "internal device reset complete";
8367 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8368 reason_str = "internal task abort complete";
8370 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8371 reason_str = "internal async notification";
8373 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8374 reason_str = "pcie hot reset failed";
8377 reason_str = "unknown reason";
8381 ioc_info(ioc, "PCIE device status change: (%s)\n"
8382 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8383 reason_str, le16_to_cpu(event_data->DevHandle),
8384 (u64)le64_to_cpu(event_data->WWID),
8385 le16_to_cpu(event_data->TaskTag));
8386 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
8387 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8388 event_data->ASC, event_data->ASCQ);
8393 * _scsih_pcie_device_status_change_event - handle device status
8395 * @ioc: per adapter object
8396 * @fw_event: The fw_event_work object
8400 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8401 struct fw_event_work *fw_event)
8403 struct MPT3SAS_TARGET *target_priv_data;
8404 struct _pcie_device *pcie_device;
8406 unsigned long flags;
8407 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8408 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8409 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8410 _scsih_pcie_device_status_change_event_debug(ioc,
8413 if (event_data->ReasonCode !=
8414 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8415 event_data->ReasonCode !=
8416 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8419 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8420 wwid = le64_to_cpu(event_data->WWID);
8421 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8423 if (!pcie_device || !pcie_device->starget)
8426 target_priv_data = pcie_device->starget->hostdata;
8427 if (!target_priv_data)
8430 if (event_data->ReasonCode ==
8431 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8432 target_priv_data->tm_busy = 1;
8434 target_priv_data->tm_busy = 0;
8437 pcie_device_put(pcie_device);
8439 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8443 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
8445 * @ioc: per adapter object
8446 * @event_data: event data payload
8450 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8451 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8453 char *reason_str = NULL;
8455 switch (event_data->ReasonCode) {
8456 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8457 reason_str = "enclosure add";
8459 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8460 reason_str = "enclosure remove";
8463 reason_str = "unknown reason";
8467 ioc_info(ioc, "enclosure status change: (%s)\n"
8468 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8470 le16_to_cpu(event_data->EnclosureHandle),
8471 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8472 le16_to_cpu(event_data->StartSlot));
8476 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
8477 * @ioc: per adapter object
8478 * @fw_event: The fw_event_work object
8482 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8483 struct fw_event_work *fw_event)
8485 Mpi2ConfigReply_t mpi_reply;
8486 struct _enclosure_node *enclosure_dev = NULL;
8487 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8488 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8490 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8492 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8493 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
8494 (Mpi2EventDataSasEnclDevStatusChange_t *)
8495 fw_event->event_data);
8496 if (ioc->shost_recovery)
8499 if (enclosure_handle)
8501 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8503 switch (event_data->ReasonCode) {
8504 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8505 if (!enclosure_dev) {
8507 kzalloc(sizeof(struct _enclosure_node),
8509 if (!enclosure_dev) {
8510 ioc_info(ioc, "failure at %s:%d/%s()!\n",
8511 __FILE__, __LINE__, __func__);
8514 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8515 &enclosure_dev->pg0,
8516 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8519 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8520 MPI2_IOCSTATUS_MASK)) {
8521 kfree(enclosure_dev);
8525 list_add_tail(&enclosure_dev->list,
8526 &ioc->enclosure_list);
8529 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8530 if (enclosure_dev) {
8531 list_del(&enclosure_dev->list);
8532 kfree(enclosure_dev);
8541 * _scsih_sas_broadcast_primitive_event - handle broadcast events
8542 * @ioc: per adapter object
8543 * @fw_event: The fw_event_work object
8547 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8548 struct fw_event_work *fw_event)
8550 struct scsi_cmnd *scmd;
8551 struct scsi_device *sdev;
8552 struct scsiio_tracker *st;
8555 struct MPT3SAS_DEVICE *sas_device_priv_data;
8556 u32 termination_count;
8558 Mpi2SCSITaskManagementReply_t *mpi_reply;
8559 Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8560 (Mpi2EventDataSasBroadcastPrimitive_t *)
8561 fw_event->event_data;
8563 unsigned long flags;
8566 u8 task_abort_retries;
8568 mutex_lock(&ioc->tm_cmds.mutex);
8569 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8570 __func__, event_data->PhyNum, event_data->PortWidth);
8572 _scsih_block_io_all_device(ioc);
8574 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8575 mpi_reply = ioc->tm_cmds.reply;
8576 broadcast_aen_retry:
8578 /* sanity checks for retrying this loop */
8579 if (max_retries++ == 5) {
8580 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
8582 } else if (max_retries > 1)
8584 ioc_info(ioc, "%s: %d retry\n",
8585 __func__, max_retries - 1));
8587 termination_count = 0;
8589 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8590 if (ioc->shost_recovery)
8592 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
8595 st = scsi_cmd_priv(scmd);
8596 sdev = scmd->device;
8597 sas_device_priv_data = sdev->hostdata;
8598 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8600 /* skip hidden raid components */
8601 if (sas_device_priv_data->sas_target->flags &
8602 MPT_TARGET_FLAGS_RAID_COMPONENT)
8605 if (sas_device_priv_data->sas_target->flags &
8606 MPT_TARGET_FLAGS_VOLUME)
8608 /* skip PCIe devices */
8609 if (sas_device_priv_data->sas_target->flags &
8610 MPT_TARGET_FLAGS_PCIE_DEVICE)
8613 handle = sas_device_priv_data->sas_target->handle;
8614 lun = sas_device_priv_data->lun;
8617 if (ioc->shost_recovery)
8620 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8621 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
8622 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
8623 st->msix_io, 30, 0);
8625 sdev_printk(KERN_WARNING, sdev,
8626 "mpt3sas_scsih_issue_tm: FAILED when sending "
8627 "QUERY_TASK: scmd(%p)\n", scmd);
8628 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8629 goto broadcast_aen_retry;
8631 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8632 & MPI2_IOCSTATUS_MASK;
8633 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8634 sdev_printk(KERN_WARNING, sdev,
8635 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8637 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8638 goto broadcast_aen_retry;
8641 /* see if IO is still owned by IOC and target */
8642 if (mpi_reply->ResponseCode ==
8643 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8644 mpi_reply->ResponseCode ==
8645 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8646 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8649 task_abort_retries = 0;
8651 if (task_abort_retries++ == 60) {
8653 ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8655 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8656 goto broadcast_aen_retry;
8659 if (ioc->shost_recovery)
8662 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8663 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8664 st->smid, st->msix_io, 30, 0);
8665 if (r == FAILED || st->cb_idx != 0xFF) {
8666 sdev_printk(KERN_WARNING, sdev,
8667 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8668 "scmd(%p)\n", scmd);
8672 if (task_abort_retries > 1)
8673 sdev_printk(KERN_WARNING, sdev,
8674 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8676 task_abort_retries - 1, scmd);
8678 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8679 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8682 if (ioc->broadcast_aen_pending) {
8685 "%s: loop back due to pending AEN\n",
8687 ioc->broadcast_aen_pending = 0;
8688 goto broadcast_aen_retry;
8692 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8696 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8697 __func__, query_count, termination_count));
8699 ioc->broadcast_aen_busy = 0;
8700 if (!ioc->shost_recovery)
8701 _scsih_ublock_io_all_device(ioc);
8702 mutex_unlock(&ioc->tm_cmds.mutex);
8706 * _scsih_sas_discovery_event - handle discovery events
8707 * @ioc: per adapter object
8708 * @fw_event: The fw_event_work object
8712 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8713 struct fw_event_work *fw_event)
8715 Mpi2EventDataSasDiscovery_t *event_data =
8716 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
8718 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
8719 ioc_info(ioc, "discovery event: (%s)",
8720 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8722 if (event_data->DiscoveryStatus)
8723 pr_cont("discovery_status(0x%08x)",
8724 le32_to_cpu(event_data->DiscoveryStatus));
8728 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8729 !ioc->sas_hba.num_phys) {
8730 if (disable_discovery > 0 && ioc->shost_recovery) {
8731 /* Wait for the reset to complete */
8732 while (ioc->shost_recovery)
8735 _scsih_sas_host_add(ioc);
8740 * _scsih_sas_device_discovery_error_event - display SAS device discovery error
8742 * @ioc: per adapter object
8743 * @fw_event: The fw_event_work object
8747 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8748 struct fw_event_work *fw_event)
8750 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8751 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8753 switch (event_data->ReasonCode) {
8754 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
8755 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8756 le16_to_cpu(event_data->DevHandle),
8757 (u64)le64_to_cpu(event_data->SASAddress),
8758 event_data->PhysicalPort);
8760 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
8761 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8762 le16_to_cpu(event_data->DevHandle),
8763 (u64)le64_to_cpu(event_data->SASAddress),
8764 event_data->PhysicalPort);
8772 * _scsih_pcie_enumeration_event - handle enumeration events
8773 * @ioc: per adapter object
8774 * @fw_event: The fw_event_work object
8778 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8779 struct fw_event_work *fw_event)
8781 Mpi26EventDataPCIeEnumeration_t *event_data =
8782 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8784 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8787 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8788 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8789 "started" : "completed",
8791 if (event_data->EnumerationStatus)
8792 pr_cont("enumeration_status(0x%08x)",
8793 le32_to_cpu(event_data->EnumerationStatus));
8798 * _scsih_ir_fastpath - turn on fastpath for IR physdisk
8799 * @ioc: per adapter object
8800 * @handle: device handle for physical disk
8801 * @phys_disk_num: physical disk number
8803 * Return: 0 for success, else failure.
8806 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8808 Mpi2RaidActionRequest_t *mpi_request;
8809 Mpi2RaidActionReply_t *mpi_reply;
8816 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8819 mutex_lock(&ioc->scsih_cmds.mutex);
8821 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
8822 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
8826 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8828 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8830 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
8831 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8836 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8837 ioc->scsih_cmds.smid = smid;
8838 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8840 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8841 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8842 mpi_request->PhysDiskNum = phys_disk_num;
8845 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8846 handle, phys_disk_num));
8848 init_completion(&ioc->scsih_cmds.done);
8849 ioc->put_smid_default(ioc, smid);
8850 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8852 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
8853 mpt3sas_check_cmd_timeout(ioc,
8854 ioc->scsih_cmds.status, mpi_request,
8855 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
8860 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8862 mpi_reply = ioc->scsih_cmds.reply;
8863 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8864 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8865 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
8868 ioc_status &= MPI2_IOCSTATUS_MASK;
8869 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8871 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8872 ioc_status, log_info));
8876 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8880 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8881 mutex_unlock(&ioc->scsih_cmds.mutex);
8884 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8889 * _scsih_reprobe_lun - reprobing lun
8890 * @sdev: scsi device struct
8891 * @no_uld_attach: sdev->no_uld_attach flag setting
8895 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
8897 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
8898 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
8899 sdev->no_uld_attach ? "hiding" : "exposing");
8900 WARN_ON(scsi_device_reprobe(sdev));
8904 * _scsih_sas_volume_add - add new volume
8905 * @ioc: per adapter object
8906 * @element: IR config element data
8910 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
8911 Mpi2EventIrConfigElement_t *element)
8913 struct _raid_device *raid_device;
8914 unsigned long flags;
8916 u16 handle = le16_to_cpu(element->VolDevHandle);
8919 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8921 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8922 __FILE__, __LINE__, __func__);
8926 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8927 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
8928 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8933 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8935 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8936 __FILE__, __LINE__, __func__);
8940 raid_device->id = ioc->sas_id++;
8941 raid_device->channel = RAID_CHANNEL;
8942 raid_device->handle = handle;
8943 raid_device->wwid = wwid;
8944 _scsih_raid_device_add(ioc, raid_device);
8945 if (!ioc->wait_for_discovery_to_complete) {
8946 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8947 raid_device->id, 0);
8949 _scsih_raid_device_remove(ioc, raid_device);
8951 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8952 _scsih_determine_boot_device(ioc, raid_device, 1);
8953 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8958 * _scsih_sas_volume_delete - delete volume
8959 * @ioc: per adapter object
8960 * @handle: volume device handle
8964 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8966 struct _raid_device *raid_device;
8967 unsigned long flags;
8968 struct MPT3SAS_TARGET *sas_target_priv_data;
8969 struct scsi_target *starget = NULL;
8971 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8972 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8974 if (raid_device->starget) {
8975 starget = raid_device->starget;
8976 sas_target_priv_data = starget->hostdata;
8977 sas_target_priv_data->deleted = 1;
8979 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
8980 raid_device->handle, (u64)raid_device->wwid);
8981 list_del(&raid_device->list);
8984 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8986 scsi_remove_target(&starget->dev);
8990 * _scsih_sas_pd_expose - expose pd component to /dev/sdX
8991 * @ioc: per adapter object
8992 * @element: IR config element data
8996 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
8997 Mpi2EventIrConfigElement_t *element)
8999 struct _sas_device *sas_device;
9000 struct scsi_target *starget = NULL;
9001 struct MPT3SAS_TARGET *sas_target_priv_data;
9002 unsigned long flags;
9003 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9005 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9006 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9008 sas_device->volume_handle = 0;
9009 sas_device->volume_wwid = 0;
9010 clear_bit(handle, ioc->pd_handles);
9011 if (sas_device->starget && sas_device->starget->hostdata) {
9012 starget = sas_device->starget;
9013 sas_target_priv_data = starget->hostdata;
9014 sas_target_priv_data->flags &=
9015 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9018 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9022 /* exposing raid component */
9024 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
9026 sas_device_put(sas_device);
9030 * _scsih_sas_pd_hide - hide pd component from /dev/sdX
9031 * @ioc: per adapter object
9032 * @element: IR config element data
9036 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9037 Mpi2EventIrConfigElement_t *element)
9039 struct _sas_device *sas_device;
9040 struct scsi_target *starget = NULL;
9041 struct MPT3SAS_TARGET *sas_target_priv_data;
9042 unsigned long flags;
9043 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9044 u16 volume_handle = 0;
9045 u64 volume_wwid = 0;
9047 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9049 mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9052 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9053 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9055 set_bit(handle, ioc->pd_handles);
9056 if (sas_device->starget && sas_device->starget->hostdata) {
9057 starget = sas_device->starget;
9058 sas_target_priv_data = starget->hostdata;
9059 sas_target_priv_data->flags |=
9060 MPT_TARGET_FLAGS_RAID_COMPONENT;
9061 sas_device->volume_handle = volume_handle;
9062 sas_device->volume_wwid = volume_wwid;
9065 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9069 /* hiding raid component */
9070 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9073 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
9075 sas_device_put(sas_device);
9079 * _scsih_sas_pd_delete - delete pd component
9080 * @ioc: per adapter object
9081 * @element: IR config element data
9085 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9086 Mpi2EventIrConfigElement_t *element)
9088 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9090 _scsih_device_remove_by_handle(ioc, handle);
9094 * _scsih_sas_pd_add - remove pd component
9095 * @ioc: per adapter object
9096 * @element: IR config element data
9100 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9101 Mpi2EventIrConfigElement_t *element)
9103 struct _sas_device *sas_device;
9104 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9105 Mpi2ConfigReply_t mpi_reply;
9106 Mpi2SasDevicePage0_t sas_device_pg0;
9111 set_bit(handle, ioc->pd_handles);
9113 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9115 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9116 sas_device_put(sas_device);
9120 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9121 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
9122 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9123 __FILE__, __LINE__, __func__);
9127 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9128 MPI2_IOCSTATUS_MASK;
9129 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9130 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9131 __FILE__, __LINE__, __func__);
9135 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9136 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9137 mpt3sas_transport_update_links(ioc, sas_address, handle,
9138 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9139 mpt3sas_get_port_by_id(ioc,
9140 sas_device_pg0.PhysicalPort, 0));
9142 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9143 _scsih_add_device(ioc, handle, 0, 1);
9147 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
9148 * @ioc: per adapter object
9149 * @event_data: event data payload
9153 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9154 Mpi2EventDataIrConfigChangeList_t *event_data)
9156 Mpi2EventIrConfigElement_t *element;
9159 char *reason_str = NULL, *element_str = NULL;
9161 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9163 ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9164 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9165 "foreign" : "native",
9166 event_data->NumElements);
9167 for (i = 0; i < event_data->NumElements; i++, element++) {
9168 switch (element->ReasonCode) {
9169 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9172 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9173 reason_str = "remove";
9175 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9176 reason_str = "no change";
9178 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9179 reason_str = "hide";
9181 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9182 reason_str = "unhide";
9184 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9185 reason_str = "volume_created";
9187 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9188 reason_str = "volume_deleted";
9190 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9191 reason_str = "pd_created";
9193 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9194 reason_str = "pd_deleted";
9197 reason_str = "unknown reason";
9200 element_type = le16_to_cpu(element->ElementFlags) &
9201 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9202 switch (element_type) {
9203 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9204 element_str = "volume";
9206 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9207 element_str = "phys disk";
9209 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9210 element_str = "hot spare";
9213 element_str = "unknown element";
9216 pr_info("\t(%s:%s), vol handle(0x%04x), " \
9217 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9218 reason_str, le16_to_cpu(element->VolDevHandle),
9219 le16_to_cpu(element->PhysDiskDevHandle),
9220 element->PhysDiskNum);
9225 * _scsih_sas_ir_config_change_event - handle ir configuration change events
9226 * @ioc: per adapter object
9227 * @fw_event: The fw_event_work object
9231 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9232 struct fw_event_work *fw_event)
9234 Mpi2EventIrConfigElement_t *element;
9237 Mpi2EventDataIrConfigChangeList_t *event_data =
9238 (Mpi2EventDataIrConfigChangeList_t *)
9239 fw_event->event_data;
9241 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9242 (!ioc->hide_ir_msg))
9243 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
9245 foreign_config = (le32_to_cpu(event_data->Flags) &
9246 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9248 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9249 if (ioc->shost_recovery &&
9250 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
9251 for (i = 0; i < event_data->NumElements; i++, element++) {
9252 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9253 _scsih_ir_fastpath(ioc,
9254 le16_to_cpu(element->PhysDiskDevHandle),
9255 element->PhysDiskNum);
9260 for (i = 0; i < event_data->NumElements; i++, element++) {
9262 switch (element->ReasonCode) {
9263 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9264 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9265 if (!foreign_config)
9266 _scsih_sas_volume_add(ioc, element);
9268 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9269 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9270 if (!foreign_config)
9271 _scsih_sas_volume_delete(ioc,
9272 le16_to_cpu(element->VolDevHandle));
9274 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9275 if (!ioc->is_warpdrive)
9276 _scsih_sas_pd_hide(ioc, element);
9278 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9279 if (!ioc->is_warpdrive)
9280 _scsih_sas_pd_expose(ioc, element);
9282 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9283 if (!ioc->is_warpdrive)
9284 _scsih_sas_pd_add(ioc, element);
9286 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9287 if (!ioc->is_warpdrive)
9288 _scsih_sas_pd_delete(ioc, element);
9295 * _scsih_sas_ir_volume_event - IR volume event
9296 * @ioc: per adapter object
9297 * @fw_event: The fw_event_work object
9301 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9302 struct fw_event_work *fw_event)
9305 unsigned long flags;
9306 struct _raid_device *raid_device;
9310 Mpi2EventDataIrVolume_t *event_data =
9311 (Mpi2EventDataIrVolume_t *) fw_event->event_data;
9313 if (ioc->shost_recovery)
9316 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9319 handle = le16_to_cpu(event_data->VolDevHandle);
9320 state = le32_to_cpu(event_data->NewValue);
9321 if (!ioc->hide_ir_msg)
9323 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9325 le32_to_cpu(event_data->PreviousValue),
9328 case MPI2_RAID_VOL_STATE_MISSING:
9329 case MPI2_RAID_VOL_STATE_FAILED:
9330 _scsih_sas_volume_delete(ioc, handle);
9333 case MPI2_RAID_VOL_STATE_ONLINE:
9334 case MPI2_RAID_VOL_STATE_DEGRADED:
9335 case MPI2_RAID_VOL_STATE_OPTIMAL:
9337 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9338 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9339 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9344 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9346 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9347 __FILE__, __LINE__, __func__);
9351 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9353 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9354 __FILE__, __LINE__, __func__);
9358 raid_device->id = ioc->sas_id++;
9359 raid_device->channel = RAID_CHANNEL;
9360 raid_device->handle = handle;
9361 raid_device->wwid = wwid;
9362 _scsih_raid_device_add(ioc, raid_device);
9363 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9364 raid_device->id, 0);
9366 _scsih_raid_device_remove(ioc, raid_device);
9369 case MPI2_RAID_VOL_STATE_INITIALIZING:
9376 * _scsih_sas_ir_physical_disk_event - PD event
9377 * @ioc: per adapter object
9378 * @fw_event: The fw_event_work object
9382 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9383 struct fw_event_work *fw_event)
9385 u16 handle, parent_handle;
9387 struct _sas_device *sas_device;
9388 Mpi2ConfigReply_t mpi_reply;
9389 Mpi2SasDevicePage0_t sas_device_pg0;
9391 Mpi2EventDataIrPhysicalDisk_t *event_data =
9392 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
9395 if (ioc->shost_recovery)
9398 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9401 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9402 state = le32_to_cpu(event_data->NewValue);
9404 if (!ioc->hide_ir_msg)
9406 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9408 le32_to_cpu(event_data->PreviousValue),
9412 case MPI2_RAID_PD_STATE_ONLINE:
9413 case MPI2_RAID_PD_STATE_DEGRADED:
9414 case MPI2_RAID_PD_STATE_REBUILDING:
9415 case MPI2_RAID_PD_STATE_OPTIMAL:
9416 case MPI2_RAID_PD_STATE_HOT_SPARE:
9418 if (!ioc->is_warpdrive)
9419 set_bit(handle, ioc->pd_handles);
9421 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9423 sas_device_put(sas_device);
9427 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9428 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9430 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9431 __FILE__, __LINE__, __func__);
9435 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9436 MPI2_IOCSTATUS_MASK;
9437 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9438 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9439 __FILE__, __LINE__, __func__);
9443 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9444 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9445 mpt3sas_transport_update_links(ioc, sas_address, handle,
9446 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9447 mpt3sas_get_port_by_id(ioc,
9448 sas_device_pg0.PhysicalPort, 0));
9450 _scsih_add_device(ioc, handle, 0, 1);
9454 case MPI2_RAID_PD_STATE_OFFLINE:
9455 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9456 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9463 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
9464 * @ioc: per adapter object
9465 * @event_data: event data payload
9469 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9470 Mpi2EventDataIrOperationStatus_t *event_data)
9472 char *reason_str = NULL;
9474 switch (event_data->RAIDOperation) {
9475 case MPI2_EVENT_IR_RAIDOP_RESYNC:
9476 reason_str = "resync";
9478 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9479 reason_str = "online capacity expansion";
9481 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9482 reason_str = "consistency check";
9484 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9485 reason_str = "background init";
9487 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9488 reason_str = "make data consistent";
9495 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9497 le16_to_cpu(event_data->VolDevHandle),
9498 event_data->PercentComplete);
9502 * _scsih_sas_ir_operation_status_event - handle RAID operation events
9503 * @ioc: per adapter object
9504 * @fw_event: The fw_event_work object
9508 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9509 struct fw_event_work *fw_event)
9511 Mpi2EventDataIrOperationStatus_t *event_data =
9512 (Mpi2EventDataIrOperationStatus_t *)
9513 fw_event->event_data;
9514 static struct _raid_device *raid_device;
9515 unsigned long flags;
9518 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9519 (!ioc->hide_ir_msg))
9520 _scsih_sas_ir_operation_status_event_debug(ioc,
9523 /* code added for raid transport support */
9524 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9526 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9527 handle = le16_to_cpu(event_data->VolDevHandle);
9528 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9530 raid_device->percent_complete =
9531 event_data->PercentComplete;
9532 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9537 * _scsih_prep_device_scan - initialize parameters prior to device scan
9538 * @ioc: per adapter object
9540 * Set the deleted flag prior to device scan. If the device is found during
9541 * the scan, then we clear the deleted flag.
9544 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9546 struct MPT3SAS_DEVICE *sas_device_priv_data;
9547 struct scsi_device *sdev;
9549 shost_for_each_device(sdev, ioc->shost) {
9550 sas_device_priv_data = sdev->hostdata;
9551 if (sas_device_priv_data && sas_device_priv_data->sas_target)
9552 sas_device_priv_data->sas_target->deleted = 1;
9557 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
9558 * @ioc: per adapter object
9559 * @sas_device_pg0: SAS Device page 0
9561 * After host reset, find out whether devices are still responding.
9562 * Used in _scsih_remove_unresponsive_sas_devices.
9565 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9566 Mpi2SasDevicePage0_t *sas_device_pg0)
9568 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9569 struct scsi_target *starget;
9570 struct _sas_device *sas_device = NULL;
9571 struct _enclosure_node *enclosure_dev = NULL;
9572 unsigned long flags;
9573 struct hba_port *port = mpt3sas_get_port_by_id(
9574 ioc, sas_device_pg0->PhysicalPort, 0);
9576 if (sas_device_pg0->EnclosureHandle) {
9578 mpt3sas_scsih_enclosure_find_by_handle(ioc,
9579 le16_to_cpu(sas_device_pg0->EnclosureHandle));
9580 if (enclosure_dev == NULL)
9581 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9582 sas_device_pg0->EnclosureHandle);
9584 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9585 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
9586 if (sas_device->sas_address != le64_to_cpu(
9587 sas_device_pg0->SASAddress))
9589 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9591 if (sas_device->port != port)
9593 sas_device->responding = 1;
9594 starget = sas_device->starget;
9595 if (starget && starget->hostdata) {
9596 sas_target_priv_data = starget->hostdata;
9597 sas_target_priv_data->tm_busy = 0;
9598 sas_target_priv_data->deleted = 0;
9600 sas_target_priv_data = NULL;
9602 starget_printk(KERN_INFO, starget,
9603 "handle(0x%04x), sas_addr(0x%016llx)\n",
9604 le16_to_cpu(sas_device_pg0->DevHandle),
9605 (unsigned long long)
9606 sas_device->sas_address);
9608 if (sas_device->enclosure_handle != 0)
9609 starget_printk(KERN_INFO, starget,
9610 "enclosure logical id(0x%016llx), slot(%d)\n",
9611 (unsigned long long)
9612 sas_device->enclosure_logical_id,
9615 if (le16_to_cpu(sas_device_pg0->Flags) &
9616 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9617 sas_device->enclosure_level =
9618 sas_device_pg0->EnclosureLevel;
9619 memcpy(&sas_device->connector_name[0],
9620 &sas_device_pg0->ConnectorName[0], 4);
9622 sas_device->enclosure_level = 0;
9623 sas_device->connector_name[0] = '\0';
9626 sas_device->enclosure_handle =
9627 le16_to_cpu(sas_device_pg0->EnclosureHandle);
9628 sas_device->is_chassis_slot_valid = 0;
9629 if (enclosure_dev) {
9630 sas_device->enclosure_logical_id = le64_to_cpu(
9631 enclosure_dev->pg0.EnclosureLogicalID);
9632 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9633 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9634 sas_device->is_chassis_slot_valid = 1;
9635 sas_device->chassis_slot =
9636 enclosure_dev->pg0.ChassisSlot;
9640 if (sas_device->handle == le16_to_cpu(
9641 sas_device_pg0->DevHandle))
9643 pr_info("\thandle changed from(0x%04x)!!!\n",
9644 sas_device->handle);
9645 sas_device->handle = le16_to_cpu(
9646 sas_device_pg0->DevHandle);
9647 if (sas_target_priv_data)
9648 sas_target_priv_data->handle =
9649 le16_to_cpu(sas_device_pg0->DevHandle);
9653 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9657 * _scsih_create_enclosure_list_after_reset - Free Existing list,
9658 * And create enclosure list by scanning all Enclosure Page(0)s
9659 * @ioc: per adapter object
9662 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9664 struct _enclosure_node *enclosure_dev;
9665 Mpi2ConfigReply_t mpi_reply;
9666 u16 enclosure_handle;
9669 /* Free existing enclosure list */
9670 mpt3sas_free_enclosure_list(ioc);
9672 /* Re constructing enclosure list after reset*/
9673 enclosure_handle = 0xFFFF;
9676 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9677 if (!enclosure_dev) {
9678 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9679 __FILE__, __LINE__, __func__);
9682 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9683 &enclosure_dev->pg0,
9684 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9687 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9688 MPI2_IOCSTATUS_MASK)) {
9689 kfree(enclosure_dev);
9692 list_add_tail(&enclosure_dev->list,
9693 &ioc->enclosure_list);
9695 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9700 * _scsih_search_responding_sas_devices -
9701 * @ioc: per adapter object
9703 * After host reset, find out whether devices are still responding.
9707 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9709 Mpi2SasDevicePage0_t sas_device_pg0;
9710 Mpi2ConfigReply_t mpi_reply;
9715 ioc_info(ioc, "search for end-devices: start\n");
9717 if (list_empty(&ioc->sas_device_list))
9721 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9722 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9724 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9725 MPI2_IOCSTATUS_MASK;
9726 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9728 handle = le16_to_cpu(sas_device_pg0.DevHandle);
9729 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9730 if (!(_scsih_is_end_device(device_info)))
9732 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
9736 ioc_info(ioc, "search for end-devices: complete\n");
9740 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
9741 * @ioc: per adapter object
9742 * @pcie_device_pg0: PCIe Device page 0
9744 * After host reset, find out whether devices are still responding.
9745 * Used in _scsih_remove_unresponding_devices.
9748 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9749 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9751 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9752 struct scsi_target *starget;
9753 struct _pcie_device *pcie_device;
9754 unsigned long flags;
9756 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9757 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
9758 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9759 && (pcie_device->slot == le16_to_cpu(
9760 pcie_device_pg0->Slot))) {
9761 pcie_device->access_status =
9762 pcie_device_pg0->AccessStatus;
9763 pcie_device->responding = 1;
9764 starget = pcie_device->starget;
9765 if (starget && starget->hostdata) {
9766 sas_target_priv_data = starget->hostdata;
9767 sas_target_priv_data->tm_busy = 0;
9768 sas_target_priv_data->deleted = 0;
9770 sas_target_priv_data = NULL;
9772 starget_printk(KERN_INFO, starget,
9773 "handle(0x%04x), wwid(0x%016llx) ",
9774 pcie_device->handle,
9775 (unsigned long long)pcie_device->wwid);
9776 if (pcie_device->enclosure_handle != 0)
9777 starget_printk(KERN_INFO, starget,
9778 "enclosure logical id(0x%016llx), "
9780 (unsigned long long)
9781 pcie_device->enclosure_logical_id,
9785 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9786 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9787 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9788 pcie_device->enclosure_level =
9789 pcie_device_pg0->EnclosureLevel;
9790 memcpy(&pcie_device->connector_name[0],
9791 &pcie_device_pg0->ConnectorName[0], 4);
9793 pcie_device->enclosure_level = 0;
9794 pcie_device->connector_name[0] = '\0';
9797 if (pcie_device->handle == le16_to_cpu(
9798 pcie_device_pg0->DevHandle))
9800 pr_info("\thandle changed from(0x%04x)!!!\n",
9801 pcie_device->handle);
9802 pcie_device->handle = le16_to_cpu(
9803 pcie_device_pg0->DevHandle);
9804 if (sas_target_priv_data)
9805 sas_target_priv_data->handle =
9806 le16_to_cpu(pcie_device_pg0->DevHandle);
9812 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9816 * _scsih_search_responding_pcie_devices -
9817 * @ioc: per adapter object
9819 * After host reset, find out whether devices are still responding.
9823 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9825 Mpi26PCIeDevicePage0_t pcie_device_pg0;
9826 Mpi2ConfigReply_t mpi_reply;
9831 ioc_info(ioc, "search for end-devices: start\n");
9833 if (list_empty(&ioc->pcie_device_list))
9837 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9838 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9840 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9841 MPI2_IOCSTATUS_MASK;
9842 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9843 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9844 __func__, ioc_status,
9845 le32_to_cpu(mpi_reply.IOCLogInfo));
9848 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9849 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9850 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
9852 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
9855 ioc_info(ioc, "search for PCIe end-devices: complete\n");
9859 * _scsih_mark_responding_raid_device - mark a raid_device as responding
9860 * @ioc: per adapter object
9861 * @wwid: world wide identifier for raid volume
9862 * @handle: device handle
9864 * After host reset, find out whether devices are still responding.
9865 * Used in _scsih_remove_unresponsive_raid_devices.
9868 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
9871 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9872 struct scsi_target *starget;
9873 struct _raid_device *raid_device;
9874 unsigned long flags;
9876 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9877 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
9878 if (raid_device->wwid == wwid && raid_device->starget) {
9879 starget = raid_device->starget;
9880 if (starget && starget->hostdata) {
9881 sas_target_priv_data = starget->hostdata;
9882 sas_target_priv_data->deleted = 0;
9884 sas_target_priv_data = NULL;
9885 raid_device->responding = 1;
9886 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9887 starget_printk(KERN_INFO, raid_device->starget,
9888 "handle(0x%04x), wwid(0x%016llx)\n", handle,
9889 (unsigned long long)raid_device->wwid);
9892 * WARPDRIVE: The handles of the PDs might have changed
9893 * across the host reset so re-initialize the
9894 * required data for Direct IO
9896 mpt3sas_init_warpdrive_properties(ioc, raid_device);
9897 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9898 if (raid_device->handle == handle) {
9899 spin_unlock_irqrestore(&ioc->raid_device_lock,
9903 pr_info("\thandle changed from(0x%04x)!!!\n",
9904 raid_device->handle);
9905 raid_device->handle = handle;
9906 if (sas_target_priv_data)
9907 sas_target_priv_data->handle = handle;
9908 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9912 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9916 * _scsih_search_responding_raid_devices -
9917 * @ioc: per adapter object
9919 * After host reset, find out whether devices are still responding.
9923 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
9925 Mpi2RaidVolPage1_t volume_pg1;
9926 Mpi2RaidVolPage0_t volume_pg0;
9927 Mpi2RaidPhysDiskPage0_t pd_pg0;
9928 Mpi2ConfigReply_t mpi_reply;
9933 if (!ioc->ir_firmware)
9936 ioc_info(ioc, "search for raid volumes: start\n");
9938 if (list_empty(&ioc->raid_device_list))
9942 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9943 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9944 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9945 MPI2_IOCSTATUS_MASK;
9946 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9948 handle = le16_to_cpu(volume_pg1.DevHandle);
9950 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9951 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9952 sizeof(Mpi2RaidVolPage0_t)))
9955 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9956 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9957 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
9958 _scsih_mark_responding_raid_device(ioc,
9959 le64_to_cpu(volume_pg1.WWID), handle);
9962 /* refresh the pd_handles */
9963 if (!ioc->is_warpdrive) {
9964 phys_disk_num = 0xFF;
9965 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
9966 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9967 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9969 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9970 MPI2_IOCSTATUS_MASK;
9971 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9973 phys_disk_num = pd_pg0.PhysDiskNum;
9974 handle = le16_to_cpu(pd_pg0.DevHandle);
9975 set_bit(handle, ioc->pd_handles);
9979 ioc_info(ioc, "search for responding raid volumes: complete\n");
9983 * _scsih_mark_responding_expander - mark a expander as responding
9984 * @ioc: per adapter object
9985 * @expander_pg0:SAS Expander Config Page0
9987 * After host reset, find out whether devices are still responding.
9988 * Used in _scsih_remove_unresponsive_expanders.
9991 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
9992 Mpi2ExpanderPage0_t *expander_pg0)
9994 struct _sas_node *sas_expander = NULL;
9995 unsigned long flags;
9997 struct _enclosure_node *enclosure_dev = NULL;
9998 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
9999 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
10000 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
10001 struct hba_port *port = mpt3sas_get_port_by_id(
10002 ioc, expander_pg0->PhysicalPort, 0);
10004 if (enclosure_handle)
10006 mpt3sas_scsih_enclosure_find_by_handle(ioc,
10009 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10010 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10011 if (sas_expander->sas_address != sas_address)
10013 if (sas_expander->port != port)
10015 sas_expander->responding = 1;
10017 if (enclosure_dev) {
10018 sas_expander->enclosure_logical_id =
10019 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10020 sas_expander->enclosure_handle =
10021 le16_to_cpu(expander_pg0->EnclosureHandle);
10024 if (sas_expander->handle == handle)
10026 pr_info("\texpander(0x%016llx): handle changed" \
10027 " from(0x%04x) to (0x%04x)!!!\n",
10028 (unsigned long long)sas_expander->sas_address,
10029 sas_expander->handle, handle);
10030 sas_expander->handle = handle;
10031 for (i = 0 ; i < sas_expander->num_phys ; i++)
10032 sas_expander->phy[i].handle = handle;
10036 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10040 * _scsih_search_responding_expanders -
10041 * @ioc: per adapter object
10043 * After host reset, find out whether devices are still responding.
10047 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10049 Mpi2ExpanderPage0_t expander_pg0;
10050 Mpi2ConfigReply_t mpi_reply;
10056 ioc_info(ioc, "search for expanders: start\n");
10058 if (list_empty(&ioc->sas_expander_list))
10062 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10063 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10065 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10066 MPI2_IOCSTATUS_MASK;
10067 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10070 handle = le16_to_cpu(expander_pg0.DevHandle);
10071 sas_address = le64_to_cpu(expander_pg0.SASAddress);
10072 port = expander_pg0.PhysicalPort;
10074 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10075 handle, (unsigned long long)sas_address,
10076 (ioc->multipath_on_hba ?
10077 port : MULTIPATH_DISABLED_PORT_ID));
10078 _scsih_mark_responding_expander(ioc, &expander_pg0);
10082 ioc_info(ioc, "search for expanders: complete\n");
10086 * _scsih_remove_unresponding_devices - removing unresponding devices
10087 * @ioc: per adapter object
10090 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
10092 struct _sas_device *sas_device, *sas_device_next;
10093 struct _sas_node *sas_expander, *sas_expander_next;
10094 struct _raid_device *raid_device, *raid_device_next;
10095 struct _pcie_device *pcie_device, *pcie_device_next;
10096 struct list_head tmp_list;
10097 unsigned long flags;
10100 ioc_info(ioc, "removing unresponding devices: start\n");
10102 /* removing unresponding end devices */
10103 ioc_info(ioc, "removing unresponding devices: end-devices\n");
10105 * Iterate, pulling off devices marked as non-responding. We become the
10106 * owner for the reference the list had on any object we prune.
10108 spin_lock_irqsave(&ioc->sas_device_lock, flags);
10109 list_for_each_entry_safe(sas_device, sas_device_next,
10110 &ioc->sas_device_list, list) {
10111 if (!sas_device->responding)
10112 list_move_tail(&sas_device->list, &head);
10114 sas_device->responding = 0;
10116 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10119 * Now, uninitialize and remove the unresponding devices we pruned.
10121 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10122 _scsih_remove_device(ioc, sas_device);
10123 list_del_init(&sas_device->list);
10124 sas_device_put(sas_device);
10127 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
10128 INIT_LIST_HEAD(&head);
10129 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10130 list_for_each_entry_safe(pcie_device, pcie_device_next,
10131 &ioc->pcie_device_list, list) {
10132 if (!pcie_device->responding)
10133 list_move_tail(&pcie_device->list, &head);
10135 pcie_device->responding = 0;
10137 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10139 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10140 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10141 list_del_init(&pcie_device->list);
10142 pcie_device_put(pcie_device);
10145 /* removing unresponding volumes */
10146 if (ioc->ir_firmware) {
10147 ioc_info(ioc, "removing unresponding devices: volumes\n");
10148 list_for_each_entry_safe(raid_device, raid_device_next,
10149 &ioc->raid_device_list, list) {
10150 if (!raid_device->responding)
10151 _scsih_sas_volume_delete(ioc,
10152 raid_device->handle);
10154 raid_device->responding = 0;
10158 /* removing unresponding expanders */
10159 ioc_info(ioc, "removing unresponding devices: expanders\n");
10160 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10161 INIT_LIST_HEAD(&tmp_list);
10162 list_for_each_entry_safe(sas_expander, sas_expander_next,
10163 &ioc->sas_expander_list, list) {
10164 if (!sas_expander->responding)
10165 list_move_tail(&sas_expander->list, &tmp_list);
10167 sas_expander->responding = 0;
10169 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10170 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10172 _scsih_expander_node_remove(ioc, sas_expander);
10175 ioc_info(ioc, "removing unresponding devices: complete\n");
10177 /* unblock devices */
10178 _scsih_ublock_io_all_device(ioc);
10182 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10183 struct _sas_node *sas_expander, u16 handle)
10185 Mpi2ExpanderPage1_t expander_pg1;
10186 Mpi2ConfigReply_t mpi_reply;
10189 for (i = 0 ; i < sas_expander->num_phys ; i++) {
10190 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10191 &expander_pg1, i, handle))) {
10192 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10193 __FILE__, __LINE__, __func__);
10197 mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10198 le16_to_cpu(expander_pg1.AttachedDevHandle), i,
10199 expander_pg1.NegotiatedLinkRate >> 4,
10200 sas_expander->port);
10205 * _scsih_scan_for_devices_after_reset - scan for devices after host reset
10206 * @ioc: per adapter object
10209 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10211 Mpi2ExpanderPage0_t expander_pg0;
10212 Mpi2SasDevicePage0_t sas_device_pg0;
10213 Mpi26PCIeDevicePage0_t pcie_device_pg0;
10214 Mpi2RaidVolPage1_t volume_pg1;
10215 Mpi2RaidVolPage0_t volume_pg0;
10216 Mpi2RaidPhysDiskPage0_t pd_pg0;
10217 Mpi2EventIrConfigElement_t element;
10218 Mpi2ConfigReply_t mpi_reply;
10219 u8 phys_disk_num, port_id;
10221 u16 handle, parent_handle;
10223 struct _sas_device *sas_device;
10224 struct _pcie_device *pcie_device;
10225 struct _sas_node *expander_device;
10226 static struct _raid_device *raid_device;
10228 unsigned long flags;
10230 ioc_info(ioc, "scan devices: start\n");
10232 _scsih_sas_host_refresh(ioc);
10234 ioc_info(ioc, "\tscan devices: expanders start\n");
10238 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10239 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10240 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10241 MPI2_IOCSTATUS_MASK;
10242 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10243 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10244 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10247 handle = le16_to_cpu(expander_pg0.DevHandle);
10248 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10249 port_id = expander_pg0.PhysicalPort;
10250 expander_device = mpt3sas_scsih_expander_find_by_sas_address(
10251 ioc, le64_to_cpu(expander_pg0.SASAddress),
10252 mpt3sas_get_port_by_id(ioc, port_id, 0));
10253 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10254 if (expander_device)
10255 _scsih_refresh_expander_links(ioc, expander_device,
10258 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10260 (u64)le64_to_cpu(expander_pg0.SASAddress));
10261 _scsih_expander_add(ioc, handle);
10262 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10264 (u64)le64_to_cpu(expander_pg0.SASAddress));
10268 ioc_info(ioc, "\tscan devices: expanders complete\n");
10270 if (!ioc->ir_firmware)
10273 ioc_info(ioc, "\tscan devices: phys disk start\n");
10276 phys_disk_num = 0xFF;
10277 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10278 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10280 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10281 MPI2_IOCSTATUS_MASK;
10282 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10283 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10284 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10287 phys_disk_num = pd_pg0.PhysDiskNum;
10288 handle = le16_to_cpu(pd_pg0.DevHandle);
10289 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10291 sas_device_put(sas_device);
10294 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10295 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10298 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10299 MPI2_IOCSTATUS_MASK;
10300 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10301 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10302 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10305 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10306 if (!_scsih_get_sas_address(ioc, parent_handle,
10308 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10310 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10311 port_id = sas_device_pg0.PhysicalPort;
10312 mpt3sas_transport_update_links(ioc, sas_address,
10313 handle, sas_device_pg0.PhyNum,
10314 MPI2_SAS_NEG_LINK_RATE_1_5,
10315 mpt3sas_get_port_by_id(ioc, port_id, 0));
10316 set_bit(handle, ioc->pd_handles);
10318 /* This will retry adding the end device.
10319 * _scsih_add_device() will decide on retries and
10320 * return "1" when it should be retried
10322 while (_scsih_add_device(ioc, handle, retry_count++,
10326 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10328 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10332 ioc_info(ioc, "\tscan devices: phys disk complete\n");
10334 ioc_info(ioc, "\tscan devices: volumes start\n");
10338 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10339 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10340 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10341 MPI2_IOCSTATUS_MASK;
10342 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10343 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10344 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10347 handle = le16_to_cpu(volume_pg1.DevHandle);
10348 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10349 raid_device = _scsih_raid_device_find_by_wwid(ioc,
10350 le64_to_cpu(volume_pg1.WWID));
10351 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10354 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10355 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10356 sizeof(Mpi2RaidVolPage0_t)))
10358 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10359 MPI2_IOCSTATUS_MASK;
10360 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10361 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10362 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10365 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10366 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10367 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
10368 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10369 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
10370 element.VolDevHandle = volume_pg1.DevHandle;
10371 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
10372 volume_pg1.DevHandle);
10373 _scsih_sas_volume_add(ioc, &element);
10374 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
10375 volume_pg1.DevHandle);
10379 ioc_info(ioc, "\tscan devices: volumes complete\n");
10383 ioc_info(ioc, "\tscan devices: end devices start\n");
10387 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10388 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10390 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10391 MPI2_IOCSTATUS_MASK;
10392 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10393 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10394 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10397 handle = le16_to_cpu(sas_device_pg0.DevHandle);
10398 if (!(_scsih_is_end_device(
10399 le32_to_cpu(sas_device_pg0.DeviceInfo))))
10401 port_id = sas_device_pg0.PhysicalPort;
10402 sas_device = mpt3sas_get_sdev_by_addr(ioc,
10403 le64_to_cpu(sas_device_pg0.SASAddress),
10404 mpt3sas_get_port_by_id(ioc, port_id, 0));
10406 sas_device_put(sas_device);
10409 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10410 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
10411 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10413 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10414 mpt3sas_transport_update_links(ioc, sas_address, handle,
10415 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10416 mpt3sas_get_port_by_id(ioc, port_id, 0));
10418 /* This will retry adding the end device.
10419 * _scsih_add_device() will decide on retries and
10420 * return "1" when it should be retried
10422 while (_scsih_add_device(ioc, handle, retry_count++,
10426 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10428 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10431 ioc_info(ioc, "\tscan devices: end devices complete\n");
10432 ioc_info(ioc, "\tscan devices: pcie end devices start\n");
10436 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10437 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10439 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10440 & MPI2_IOCSTATUS_MASK;
10441 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10442 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10443 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10446 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
10447 if (!(_scsih_is_nvme_pciescsi_device(
10448 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10450 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10451 le64_to_cpu(pcie_device_pg0.WWID));
10453 pcie_device_put(pcie_device);
10457 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10458 _scsih_pcie_add_device(ioc, handle);
10460 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10461 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
10463 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10464 ioc_info(ioc, "scan devices: complete\n");
10468 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
10469 * @ioc: per adapter object
10471 * The handler for doing any required cleanup or initialization.
10473 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10475 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
10479 * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
10481 * @ioc: per adapter object
10483 * The handler for doing any required cleanup or initialization.
10486 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
10489 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
10490 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10491 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10492 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10493 complete(&ioc->scsih_cmds.done);
10495 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10496 ioc->tm_cmds.status |= MPT3_CMD_RESET;
10497 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10498 complete(&ioc->tm_cmds.done);
10501 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10502 memset(ioc->device_remove_in_progress, 0,
10503 ioc->device_remove_in_progress_sz);
10504 _scsih_fw_event_cleanup_queue(ioc);
10505 _scsih_flush_running_cmds(ioc);
10509 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
10510 * @ioc: per adapter object
10512 * The handler for doing any required cleanup or initialization.
10515 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10517 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
10518 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
10519 !ioc->sas_hba.num_phys)) {
10520 if (ioc->multipath_on_hba) {
10521 _scsih_sas_port_refresh(ioc);
10522 _scsih_update_vphys_after_reset(ioc);
10524 _scsih_prep_device_scan(ioc);
10525 _scsih_create_enclosure_list_after_reset(ioc);
10526 _scsih_search_responding_sas_devices(ioc);
10527 _scsih_search_responding_pcie_devices(ioc);
10528 _scsih_search_responding_raid_devices(ioc);
10529 _scsih_search_responding_expanders(ioc);
10530 _scsih_error_recovery_delete_devices(ioc);
10535 * _mpt3sas_fw_work - delayed task for processing firmware events
10536 * @ioc: per adapter object
10537 * @fw_event: The fw_event_work object
10541 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10543 ioc->current_event = fw_event;
10544 _scsih_fw_event_del_from_list(ioc, fw_event);
10546 /* the queue is being flushed so ignore this event */
10547 if (ioc->remove_host || ioc->pci_error_recovery) {
10548 fw_event_work_put(fw_event);
10549 ioc->current_event = NULL;
10553 switch (fw_event->event) {
10554 case MPT3SAS_PROCESS_TRIGGER_DIAG:
10555 mpt3sas_process_trigger_data(ioc,
10556 (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10557 fw_event->event_data);
10559 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
10560 while (scsi_host_in_recovery(ioc->shost) ||
10561 ioc->shost_recovery) {
10563 * If we're unloading or cancelling the work, bail.
10564 * Otherwise, this can become an infinite loop.
10566 if (ioc->remove_host || ioc->fw_events_cleanup)
10570 _scsih_remove_unresponding_devices(ioc);
10571 _scsih_del_dirty_vphy(ioc);
10572 _scsih_del_dirty_port_entries(ioc);
10573 _scsih_scan_for_devices_after_reset(ioc);
10574 _scsih_set_nvme_max_shutdown_latency(ioc);
10576 case MPT3SAS_PORT_ENABLE_COMPLETE:
10577 ioc->start_scan = 0;
10578 if (missing_delay[0] != -1 && missing_delay[1] != -1)
10579 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10582 ioc_info(ioc, "port enable: complete from worker thread\n"));
10584 case MPT3SAS_TURN_ON_PFA_LED:
10585 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
10587 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10588 _scsih_sas_topology_change_event(ioc, fw_event);
10590 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10591 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10592 _scsih_sas_device_status_change_event_debug(ioc,
10593 (Mpi2EventDataSasDeviceStatusChange_t *)
10594 fw_event->event_data);
10596 case MPI2_EVENT_SAS_DISCOVERY:
10597 _scsih_sas_discovery_event(ioc, fw_event);
10599 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10600 _scsih_sas_device_discovery_error_event(ioc, fw_event);
10602 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10603 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
10605 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10606 _scsih_sas_enclosure_dev_status_change_event(ioc,
10609 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10610 _scsih_sas_ir_config_change_event(ioc, fw_event);
10612 case MPI2_EVENT_IR_VOLUME:
10613 _scsih_sas_ir_volume_event(ioc, fw_event);
10615 case MPI2_EVENT_IR_PHYSICAL_DISK:
10616 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
10618 case MPI2_EVENT_IR_OPERATION_STATUS:
10619 _scsih_sas_ir_operation_status_event(ioc, fw_event);
10621 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10622 _scsih_pcie_device_status_change_event(ioc, fw_event);
10624 case MPI2_EVENT_PCIE_ENUMERATION:
10625 _scsih_pcie_enumeration_event(ioc, fw_event);
10627 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10628 _scsih_pcie_topology_change_event(ioc, fw_event);
10629 ioc->current_event = NULL;
10634 fw_event_work_put(fw_event);
10635 ioc->current_event = NULL;
10639 * _firmware_event_work
10640 * @work: The fw_event_work object
10643 * wrappers for the work thread handling firmware events
10647 _firmware_event_work(struct work_struct *work)
10649 struct fw_event_work *fw_event = container_of(work,
10650 struct fw_event_work, work);
10652 _mpt3sas_fw_work(fw_event->ioc, fw_event);
10656 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
10657 * @ioc: per adapter object
10658 * @msix_index: MSIX table index supplied by the OS
10659 * @reply: reply message frame(lower 32bit addr)
10660 * Context: interrupt.
10662 * This function merely adds a new work task into ioc->firmware_event_thread.
10663 * The tasks are worked from _firmware_event_work in user context.
10665 * Return: 1 meaning mf should be freed from _base_interrupt
10666 * 0 means the mf is freed from this function.
10669 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10672 struct fw_event_work *fw_event;
10673 Mpi2EventNotificationReply_t *mpi_reply;
10676 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
10678 /* events turned off due to host reset */
10679 if (ioc->pci_error_recovery)
10682 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10684 if (unlikely(!mpi_reply)) {
10685 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10686 __FILE__, __LINE__, __func__);
10690 event = le16_to_cpu(mpi_reply->Event);
10692 if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10693 mpt3sas_trigger_event(ioc, event, 0);
10697 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10699 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10700 (Mpi2EventDataSasBroadcastPrimitive_t *)
10701 mpi_reply->EventData;
10703 if (baen_data->Primitive !=
10704 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10707 if (ioc->broadcast_aen_busy) {
10708 ioc->broadcast_aen_pending++;
10711 ioc->broadcast_aen_busy = 1;
10715 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10716 _scsih_check_topo_delete_events(ioc,
10717 (Mpi2EventDataSasTopologyChangeList_t *)
10718 mpi_reply->EventData);
10720 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10721 _scsih_check_pcie_topo_remove_events(ioc,
10722 (Mpi26EventDataPCIeTopologyChangeList_t *)
10723 mpi_reply->EventData);
10725 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10726 _scsih_check_ir_config_unhide_events(ioc,
10727 (Mpi2EventDataIrConfigChangeList_t *)
10728 mpi_reply->EventData);
10730 case MPI2_EVENT_IR_VOLUME:
10731 _scsih_check_volume_delete_events(ioc,
10732 (Mpi2EventDataIrVolume_t *)
10733 mpi_reply->EventData);
10735 case MPI2_EVENT_LOG_ENTRY_ADDED:
10737 Mpi2EventDataLogEntryAdded_t *log_entry;
10740 if (!ioc->is_warpdrive)
10743 log_entry = (Mpi2EventDataLogEntryAdded_t *)
10744 mpi_reply->EventData;
10745 log_code = (u32 *)log_entry->LogData;
10747 if (le16_to_cpu(log_entry->LogEntryQualifier)
10748 != MPT2_WARPDRIVE_LOGENTRY)
10751 switch (le32_to_cpu(*log_code)) {
10752 case MPT2_WARPDRIVE_LC_SSDT:
10753 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10755 case MPT2_WARPDRIVE_LC_SSDLW:
10756 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
10758 case MPT2_WARPDRIVE_LC_SSDLF:
10759 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
10761 case MPT2_WARPDRIVE_LC_BRMF:
10762 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10768 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10769 _scsih_sas_device_status_change_event(ioc,
10770 (Mpi2EventDataSasDeviceStatusChange_t *)
10771 mpi_reply->EventData);
10773 case MPI2_EVENT_IR_OPERATION_STATUS:
10774 case MPI2_EVENT_SAS_DISCOVERY:
10775 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10776 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10777 case MPI2_EVENT_IR_PHYSICAL_DISK:
10778 case MPI2_EVENT_PCIE_ENUMERATION:
10779 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10782 case MPI2_EVENT_TEMP_THRESHOLD:
10783 _scsih_temp_threshold_events(ioc,
10784 (Mpi2EventDataTemperature_t *)
10785 mpi_reply->EventData);
10787 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10788 ActiveCableEventData =
10789 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
10790 switch (ActiveCableEventData->ReasonCode) {
10791 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
10792 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
10793 ActiveCableEventData->ReceptacleID);
10794 pr_notice("cannot be powered and devices connected\n");
10795 pr_notice("to this active cable will not be seen\n");
10796 pr_notice("This active cable requires %d mW of power\n",
10797 ActiveCableEventData->ActiveCablePowerRequirement);
10800 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
10801 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
10802 ActiveCableEventData->ReceptacleID);
10804 "is not running at optimal speed(12 Gb/s rate)\n");
10810 default: /* ignore the rest */
10814 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
10815 fw_event = alloc_fw_event_work(sz);
10817 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10818 __FILE__, __LINE__, __func__);
10822 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
10823 fw_event->ioc = ioc;
10824 fw_event->VF_ID = mpi_reply->VF_ID;
10825 fw_event->VP_ID = mpi_reply->VP_ID;
10826 fw_event->event = event;
10827 _scsih_fw_event_add(ioc, fw_event);
10828 fw_event_work_put(fw_event);
10833 * _scsih_expander_node_remove - removing expander device from list.
10834 * @ioc: per adapter object
10835 * @sas_expander: the sas_device object
10837 * Removing object and freeing associated memory from the
10838 * ioc->sas_expander_list.
10841 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
10842 struct _sas_node *sas_expander)
10844 struct _sas_port *mpt3sas_port, *next;
10845 unsigned long flags;
10847 /* remove sibling ports attached to this expander */
10848 list_for_each_entry_safe(mpt3sas_port, next,
10849 &sas_expander->sas_port_list, port_list) {
10850 if (ioc->shost_recovery)
10852 if (mpt3sas_port->remote_identify.device_type ==
10854 mpt3sas_device_remove_by_sas_address(ioc,
10855 mpt3sas_port->remote_identify.sas_address,
10856 mpt3sas_port->hba_port);
10857 else if (mpt3sas_port->remote_identify.device_type ==
10858 SAS_EDGE_EXPANDER_DEVICE ||
10859 mpt3sas_port->remote_identify.device_type ==
10860 SAS_FANOUT_EXPANDER_DEVICE)
10861 mpt3sas_expander_remove(ioc,
10862 mpt3sas_port->remote_identify.sas_address,
10863 mpt3sas_port->hba_port);
10866 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
10867 sas_expander->sas_address_parent, sas_expander->port);
10870 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10871 sas_expander->handle, (unsigned long long)
10872 sas_expander->sas_address,
10873 sas_expander->port->port_id);
10875 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10876 list_del(&sas_expander->list);
10877 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10879 kfree(sas_expander->phy);
10880 kfree(sas_expander);
10884 * _scsih_nvme_shutdown - NVMe shutdown notification
10885 * @ioc: per adapter object
10887 * Sending IoUnitControl request with shutdown operation code to alert IOC that
10888 * the host system is shutting down so that IOC can issue NVMe shutdown to
10889 * NVMe drives attached to it.
10892 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
10894 Mpi26IoUnitControlRequest_t *mpi_request;
10895 Mpi26IoUnitControlReply_t *mpi_reply;
10898 /* are there any NVMe devices ? */
10899 if (list_empty(&ioc->pcie_device_list))
10902 mutex_lock(&ioc->scsih_cmds.mutex);
10904 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
10905 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
10909 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
10911 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
10914 "%s: failed obtaining a smid\n", __func__);
10915 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10919 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
10920 ioc->scsih_cmds.smid = smid;
10921 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
10922 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
10923 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
10925 init_completion(&ioc->scsih_cmds.done);
10926 ioc->put_smid_default(ioc, smid);
10927 /* Wait for max_shutdown_latency seconds */
10929 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
10930 ioc->max_shutdown_latency);
10931 wait_for_completion_timeout(&ioc->scsih_cmds.done,
10932 ioc->max_shutdown_latency*HZ);
10934 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
10935 ioc_err(ioc, "%s: timeout\n", __func__);
10939 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
10940 mpi_reply = ioc->scsih_cmds.reply;
10941 ioc_info(ioc, "Io Unit Control shutdown (complete):"
10942 "ioc_status(0x%04x), loginfo(0x%08x)\n",
10943 le16_to_cpu(mpi_reply->IOCStatus),
10944 le32_to_cpu(mpi_reply->IOCLogInfo));
10947 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10948 mutex_unlock(&ioc->scsih_cmds.mutex);
10953 * _scsih_ir_shutdown - IR shutdown notification
10954 * @ioc: per adapter object
10956 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
10957 * the host system is shutting down.
10960 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
10962 Mpi2RaidActionRequest_t *mpi_request;
10963 Mpi2RaidActionReply_t *mpi_reply;
10966 /* is IR firmware build loaded ? */
10967 if (!ioc->ir_firmware)
10970 /* are there any volumes ? */
10971 if (list_empty(&ioc->raid_device_list))
10974 mutex_lock(&ioc->scsih_cmds.mutex);
10976 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
10977 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
10980 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
10982 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
10984 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
10985 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10989 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
10990 ioc->scsih_cmds.smid = smid;
10991 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
10993 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
10994 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
10996 if (!ioc->hide_ir_msg)
10997 ioc_info(ioc, "IR shutdown (sending)\n");
10998 init_completion(&ioc->scsih_cmds.done);
10999 ioc->put_smid_default(ioc, smid);
11000 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11002 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11003 ioc_err(ioc, "%s: timeout\n", __func__);
11007 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11008 mpi_reply = ioc->scsih_cmds.reply;
11009 if (!ioc->hide_ir_msg)
11010 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11011 le16_to_cpu(mpi_reply->IOCStatus),
11012 le32_to_cpu(mpi_reply->IOCLogInfo));
11016 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11017 mutex_unlock(&ioc->scsih_cmds.mutex);
11021 * _scsih_get_shost_and_ioc - get shost and ioc
11022 * and verify whether they are NULL or not
11023 * @pdev: PCI device struct
11024 * @shost: address of scsi host pointer
11025 * @ioc: address of HBA adapter pointer
11027 * Return zero if *shost and *ioc are not NULL otherwise return error number.
11030 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
11031 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11033 *shost = pci_get_drvdata(pdev);
11034 if (*shost == NULL) {
11035 dev_err(&pdev->dev, "pdev's driver data is null\n");
11039 *ioc = shost_priv(*shost);
11040 if (*ioc == NULL) {
11041 dev_err(&pdev->dev, "shost's private data is null\n");
11049 * scsih_remove - detach and remove add host
11050 * @pdev: PCI device struct
11052 * Routine called when unloading the driver.
11054 static void scsih_remove(struct pci_dev *pdev)
11056 struct Scsi_Host *shost;
11057 struct MPT3SAS_ADAPTER *ioc;
11058 struct _sas_port *mpt3sas_port, *next_port;
11059 struct _raid_device *raid_device, *next;
11060 struct MPT3SAS_TARGET *sas_target_priv_data;
11061 struct _pcie_device *pcie_device, *pcienext;
11062 struct workqueue_struct *wq;
11063 unsigned long flags;
11064 Mpi2ConfigReply_t mpi_reply;
11065 struct hba_port *port, *port_next;
11067 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11070 ioc->remove_host = 1;
11072 if (!pci_device_is_present(pdev))
11073 _scsih_flush_running_cmds(ioc);
11075 _scsih_fw_event_cleanup_queue(ioc);
11077 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11078 wq = ioc->firmware_event_thread;
11079 ioc->firmware_event_thread = NULL;
11080 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11082 destroy_workqueue(wq);
11084 * Copy back the unmodified ioc page1. so that on next driver load,
11085 * current modified changes on ioc page1 won't take effect.
11087 if (ioc->is_aero_ioc)
11088 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11089 &ioc->ioc_pg1_copy);
11090 /* release all the volumes */
11091 _scsih_ir_shutdown(ioc);
11092 mpt3sas_destroy_debugfs(ioc);
11093 sas_remove_host(shost);
11094 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11096 if (raid_device->starget) {
11097 sas_target_priv_data =
11098 raid_device->starget->hostdata;
11099 sas_target_priv_data->deleted = 1;
11100 scsi_remove_target(&raid_device->starget->dev);
11102 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11103 raid_device->handle, (u64)raid_device->wwid);
11104 _scsih_raid_device_remove(ioc, raid_device);
11106 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11108 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11109 list_del_init(&pcie_device->list);
11110 pcie_device_put(pcie_device);
11113 /* free ports attached to the sas_host */
11114 list_for_each_entry_safe(mpt3sas_port, next_port,
11115 &ioc->sas_hba.sas_port_list, port_list) {
11116 if (mpt3sas_port->remote_identify.device_type ==
11118 mpt3sas_device_remove_by_sas_address(ioc,
11119 mpt3sas_port->remote_identify.sas_address,
11120 mpt3sas_port->hba_port);
11121 else if (mpt3sas_port->remote_identify.device_type ==
11122 SAS_EDGE_EXPANDER_DEVICE ||
11123 mpt3sas_port->remote_identify.device_type ==
11124 SAS_FANOUT_EXPANDER_DEVICE)
11125 mpt3sas_expander_remove(ioc,
11126 mpt3sas_port->remote_identify.sas_address,
11127 mpt3sas_port->hba_port);
11130 list_for_each_entry_safe(port, port_next,
11131 &ioc->port_table_list, list) {
11132 list_del(&port->list);
11136 /* free phys attached to the sas_host */
11137 if (ioc->sas_hba.num_phys) {
11138 kfree(ioc->sas_hba.phy);
11139 ioc->sas_hba.phy = NULL;
11140 ioc->sas_hba.num_phys = 0;
11143 mpt3sas_base_detach(ioc);
11144 spin_lock(&gioc_lock);
11145 list_del(&ioc->list);
11146 spin_unlock(&gioc_lock);
11147 scsi_host_put(shost);
11151 * scsih_shutdown - routine call during system shutdown
11152 * @pdev: PCI device struct
11155 scsih_shutdown(struct pci_dev *pdev)
11157 struct Scsi_Host *shost;
11158 struct MPT3SAS_ADAPTER *ioc;
11159 struct workqueue_struct *wq;
11160 unsigned long flags;
11161 Mpi2ConfigReply_t mpi_reply;
11163 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11166 ioc->remove_host = 1;
11168 if (!pci_device_is_present(pdev))
11169 _scsih_flush_running_cmds(ioc);
11171 _scsih_fw_event_cleanup_queue(ioc);
11173 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11174 wq = ioc->firmware_event_thread;
11175 ioc->firmware_event_thread = NULL;
11176 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11178 destroy_workqueue(wq);
11180 * Copy back the unmodified ioc page1 so that on next driver load,
11181 * current modified changes on ioc page1 won't take effect.
11183 if (ioc->is_aero_ioc)
11184 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11185 &ioc->ioc_pg1_copy);
11187 _scsih_ir_shutdown(ioc);
11188 _scsih_nvme_shutdown(ioc);
11189 mpt3sas_base_detach(ioc);
11194 * _scsih_probe_boot_devices - reports 1st device
11195 * @ioc: per adapter object
11197 * If specified in bios page 2, this routine reports the 1st
11198 * device scsi-ml or sas transport for persistent boot device
11199 * purposes. Please refer to function _scsih_determine_boot_device()
11202 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11206 struct _sas_device *sas_device;
11207 struct _raid_device *raid_device;
11208 struct _pcie_device *pcie_device;
11210 u64 sas_address_parent;
11212 unsigned long flags;
11215 struct hba_port *port;
11217 /* no Bios, return immediately */
11218 if (!ioc->bios_pg3.BiosVersion)
11222 if (ioc->req_boot_device.device) {
11223 device = ioc->req_boot_device.device;
11224 channel = ioc->req_boot_device.channel;
11225 } else if (ioc->req_alt_boot_device.device) {
11226 device = ioc->req_alt_boot_device.device;
11227 channel = ioc->req_alt_boot_device.channel;
11228 } else if (ioc->current_boot_device.device) {
11229 device = ioc->current_boot_device.device;
11230 channel = ioc->current_boot_device.channel;
11236 if (channel == RAID_CHANNEL) {
11237 raid_device = device;
11238 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11239 raid_device->id, 0);
11241 _scsih_raid_device_remove(ioc, raid_device);
11242 } else if (channel == PCIE_CHANNEL) {
11243 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11244 pcie_device = device;
11245 tid = pcie_device->id;
11246 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11247 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11248 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11250 _scsih_pcie_device_remove(ioc, pcie_device);
11252 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11253 sas_device = device;
11254 handle = sas_device->handle;
11255 sas_address_parent = sas_device->sas_address_parent;
11256 sas_address = sas_device->sas_address;
11257 port = sas_device->port;
11258 list_move_tail(&sas_device->list, &ioc->sas_device_list);
11259 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11261 if (ioc->hide_drives)
11267 if (!mpt3sas_transport_port_add(ioc, handle,
11268 sas_address_parent, port)) {
11269 _scsih_sas_device_remove(ioc, sas_device);
11270 } else if (!sas_device->starget) {
11271 if (!ioc->is_driver_loading) {
11272 mpt3sas_transport_port_remove(ioc,
11274 sas_address_parent, port);
11275 _scsih_sas_device_remove(ioc, sas_device);
11282 * _scsih_probe_raid - reporting raid volumes to scsi-ml
11283 * @ioc: per adapter object
11285 * Called during initial loading of the driver.
11288 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11290 struct _raid_device *raid_device, *raid_next;
11293 list_for_each_entry_safe(raid_device, raid_next,
11294 &ioc->raid_device_list, list) {
11295 if (raid_device->starget)
11297 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11298 raid_device->id, 0);
11300 _scsih_raid_device_remove(ioc, raid_device);
11304 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11306 struct _sas_device *sas_device = NULL;
11307 unsigned long flags;
11309 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11310 if (!list_empty(&ioc->sas_device_init_list)) {
11311 sas_device = list_first_entry(&ioc->sas_device_init_list,
11312 struct _sas_device, list);
11313 sas_device_get(sas_device);
11315 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11320 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11321 struct _sas_device *sas_device)
11323 unsigned long flags;
11325 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11328 * Since we dropped the lock during the call to port_add(), we need to
11329 * be careful here that somebody else didn't move or delete this item
11330 * while we were busy with other things.
11332 * If it was on the list, we need a put() for the reference the list
11333 * had. Either way, we need a get() for the destination list.
11335 if (!list_empty(&sas_device->list)) {
11336 list_del_init(&sas_device->list);
11337 sas_device_put(sas_device);
11340 sas_device_get(sas_device);
11341 list_add_tail(&sas_device->list, &ioc->sas_device_list);
11343 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11347 * _scsih_probe_sas - reporting sas devices to sas transport
11348 * @ioc: per adapter object
11350 * Called during initial loading of the driver.
11353 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11355 struct _sas_device *sas_device;
11357 if (ioc->hide_drives)
11360 while ((sas_device = get_next_sas_device(ioc))) {
11361 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
11362 sas_device->sas_address_parent, sas_device->port)) {
11363 _scsih_sas_device_remove(ioc, sas_device);
11364 sas_device_put(sas_device);
11366 } else if (!sas_device->starget) {
11368 * When asyn scanning is enabled, its not possible to
11369 * remove devices while scanning is turned on due to an
11370 * oops in scsi_sysfs_add_sdev()->add_device()->
11371 * sysfs_addrm_start()
11373 if (!ioc->is_driver_loading) {
11374 mpt3sas_transport_port_remove(ioc,
11375 sas_device->sas_address,
11376 sas_device->sas_address_parent,
11378 _scsih_sas_device_remove(ioc, sas_device);
11379 sas_device_put(sas_device);
11383 sas_device_make_active(ioc, sas_device);
11384 sas_device_put(sas_device);
11389 * get_next_pcie_device - Get the next pcie device
11390 * @ioc: per adapter object
11392 * Get the next pcie device from pcie_device_init_list list.
11394 * Return: pcie device structure if pcie_device_init_list list is not empty
11395 * otherwise returns NULL
11397 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11399 struct _pcie_device *pcie_device = NULL;
11400 unsigned long flags;
11402 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11403 if (!list_empty(&ioc->pcie_device_init_list)) {
11404 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11405 struct _pcie_device, list);
11406 pcie_device_get(pcie_device);
11408 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11410 return pcie_device;
11414 * pcie_device_make_active - Add pcie device to pcie_device_list list
11415 * @ioc: per adapter object
11416 * @pcie_device: pcie device object
11418 * Add the pcie device which has registered with SCSI Transport Later to
11419 * pcie_device_list list
11421 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11422 struct _pcie_device *pcie_device)
11424 unsigned long flags;
11426 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11428 if (!list_empty(&pcie_device->list)) {
11429 list_del_init(&pcie_device->list);
11430 pcie_device_put(pcie_device);
11432 pcie_device_get(pcie_device);
11433 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11435 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11439 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
11440 * @ioc: per adapter object
11442 * Called during initial loading of the driver.
11445 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11447 struct _pcie_device *pcie_device;
11450 /* PCIe Device List */
11451 while ((pcie_device = get_next_pcie_device(ioc))) {
11452 if (pcie_device->starget) {
11453 pcie_device_put(pcie_device);
11456 if (pcie_device->access_status ==
11457 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11458 pcie_device_make_active(ioc, pcie_device);
11459 pcie_device_put(pcie_device);
11462 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11463 pcie_device->id, 0);
11465 _scsih_pcie_device_remove(ioc, pcie_device);
11466 pcie_device_put(pcie_device);
11468 } else if (!pcie_device->starget) {
11470 * When async scanning is enabled, its not possible to
11471 * remove devices while scanning is turned on due to an
11472 * oops in scsi_sysfs_add_sdev()->add_device()->
11473 * sysfs_addrm_start()
11475 if (!ioc->is_driver_loading) {
11476 /* TODO-- Need to find out whether this condition will
11479 _scsih_pcie_device_remove(ioc, pcie_device);
11480 pcie_device_put(pcie_device);
11484 pcie_device_make_active(ioc, pcie_device);
11485 pcie_device_put(pcie_device);
11490 * _scsih_probe_devices - probing for devices
11491 * @ioc: per adapter object
11493 * Called during initial loading of the driver.
11496 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11498 u16 volume_mapping_flags;
11500 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11501 return; /* return when IOC doesn't support initiator mode */
11503 _scsih_probe_boot_devices(ioc);
11505 if (ioc->ir_firmware) {
11506 volume_mapping_flags =
11507 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11508 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11509 if (volume_mapping_flags ==
11510 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11511 _scsih_probe_raid(ioc);
11512 _scsih_probe_sas(ioc);
11514 _scsih_probe_sas(ioc);
11515 _scsih_probe_raid(ioc);
11518 _scsih_probe_sas(ioc);
11519 _scsih_probe_pcie(ioc);
11524 * scsih_scan_start - scsi lld callback for .scan_start
11525 * @shost: SCSI host pointer
11527 * The shost has the ability to discover targets on its own instead
11528 * of scanning the entire bus. In our implemention, we will kick off
11529 * firmware discovery.
11532 scsih_scan_start(struct Scsi_Host *shost)
11534 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11536 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11537 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
11538 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11539 mpt3sas_enable_diag_buffer(ioc, 1);
11541 if (disable_discovery > 0)
11544 ioc->start_scan = 1;
11545 rc = mpt3sas_port_enable(ioc);
11548 ioc_info(ioc, "port enable: FAILED\n");
11552 * scsih_scan_finished - scsi lld callback for .scan_finished
11553 * @shost: SCSI host pointer
11554 * @time: elapsed time of the scan in jiffies
11556 * This function will be called periodicallyn until it returns 1 with the
11557 * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
11558 * we wait for firmware discovery to complete, then return 1.
11561 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
11563 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11565 if (disable_discovery > 0) {
11566 ioc->is_driver_loading = 0;
11567 ioc->wait_for_discovery_to_complete = 0;
11571 if (time >= (300 * HZ)) {
11572 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11573 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
11574 ioc->is_driver_loading = 0;
11578 if (ioc->start_scan)
11581 if (ioc->start_scan_failed) {
11582 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11583 ioc->start_scan_failed);
11584 ioc->is_driver_loading = 0;
11585 ioc->wait_for_discovery_to_complete = 0;
11586 ioc->remove_host = 1;
11590 ioc_info(ioc, "port enable: SUCCESS\n");
11591 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11593 if (ioc->wait_for_discovery_to_complete) {
11594 ioc->wait_for_discovery_to_complete = 0;
11595 _scsih_probe_devices(ioc);
11597 mpt3sas_base_start_watchdog(ioc);
11598 ioc->is_driver_loading = 0;
11602 /* shost template for SAS 2.0 HBA devices */
11603 static struct scsi_host_template mpt2sas_driver_template = {
11604 .module = THIS_MODULE,
11605 .name = "Fusion MPT SAS Host",
11606 .proc_name = MPT2SAS_DRIVER_NAME,
11607 .queuecommand = scsih_qcmd,
11608 .target_alloc = scsih_target_alloc,
11609 .slave_alloc = scsih_slave_alloc,
11610 .slave_configure = scsih_slave_configure,
11611 .target_destroy = scsih_target_destroy,
11612 .slave_destroy = scsih_slave_destroy,
11613 .scan_finished = scsih_scan_finished,
11614 .scan_start = scsih_scan_start,
11615 .change_queue_depth = scsih_change_queue_depth,
11616 .eh_abort_handler = scsih_abort,
11617 .eh_device_reset_handler = scsih_dev_reset,
11618 .eh_target_reset_handler = scsih_target_reset,
11619 .eh_host_reset_handler = scsih_host_reset,
11620 .bios_param = scsih_bios_param,
11623 .sg_tablesize = MPT2SAS_SG_DEPTH,
11624 .max_sectors = 32767,
11626 .shost_attrs = mpt3sas_host_attrs,
11627 .sdev_attrs = mpt3sas_dev_attrs,
11628 .track_queue_depth = 1,
11629 .cmd_size = sizeof(struct scsiio_tracker),
11632 /* raid transport support for SAS 2.0 HBA devices */
11633 static struct raid_function_template mpt2sas_raid_functions = {
11634 .cookie = &mpt2sas_driver_template,
11635 .is_raid = scsih_is_raid,
11636 .get_resync = scsih_get_resync,
11637 .get_state = scsih_get_state,
11640 /* shost template for SAS 3.0 HBA devices */
11641 static struct scsi_host_template mpt3sas_driver_template = {
11642 .module = THIS_MODULE,
11643 .name = "Fusion MPT SAS Host",
11644 .proc_name = MPT3SAS_DRIVER_NAME,
11645 .queuecommand = scsih_qcmd,
11646 .target_alloc = scsih_target_alloc,
11647 .slave_alloc = scsih_slave_alloc,
11648 .slave_configure = scsih_slave_configure,
11649 .target_destroy = scsih_target_destroy,
11650 .slave_destroy = scsih_slave_destroy,
11651 .scan_finished = scsih_scan_finished,
11652 .scan_start = scsih_scan_start,
11653 .change_queue_depth = scsih_change_queue_depth,
11654 .eh_abort_handler = scsih_abort,
11655 .eh_device_reset_handler = scsih_dev_reset,
11656 .eh_target_reset_handler = scsih_target_reset,
11657 .eh_host_reset_handler = scsih_host_reset,
11658 .bios_param = scsih_bios_param,
11661 .sg_tablesize = MPT3SAS_SG_DEPTH,
11662 .max_sectors = 32767,
11663 .max_segment_size = 0xffffffff,
11665 .shost_attrs = mpt3sas_host_attrs,
11666 .sdev_attrs = mpt3sas_dev_attrs,
11667 .track_queue_depth = 1,
11668 .cmd_size = sizeof(struct scsiio_tracker),
11671 /* raid transport support for SAS 3.0 HBA devices */
11672 static struct raid_function_template mpt3sas_raid_functions = {
11673 .cookie = &mpt3sas_driver_template,
11674 .is_raid = scsih_is_raid,
11675 .get_resync = scsih_get_resync,
11676 .get_state = scsih_get_state,
11680 * _scsih_determine_hba_mpi_version - determine in which MPI version class
11681 * this device belongs to.
11682 * @pdev: PCI device struct
11684 * return MPI2_VERSION for SAS 2.0 HBA devices,
11685 * MPI25_VERSION for SAS 3.0 HBA devices, and
11686 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
11689 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
11692 switch (pdev->device) {
11693 case MPI2_MFGPAGE_DEVID_SSS6200:
11694 case MPI2_MFGPAGE_DEVID_SAS2004:
11695 case MPI2_MFGPAGE_DEVID_SAS2008:
11696 case MPI2_MFGPAGE_DEVID_SAS2108_1:
11697 case MPI2_MFGPAGE_DEVID_SAS2108_2:
11698 case MPI2_MFGPAGE_DEVID_SAS2108_3:
11699 case MPI2_MFGPAGE_DEVID_SAS2116_1:
11700 case MPI2_MFGPAGE_DEVID_SAS2116_2:
11701 case MPI2_MFGPAGE_DEVID_SAS2208_1:
11702 case MPI2_MFGPAGE_DEVID_SAS2208_2:
11703 case MPI2_MFGPAGE_DEVID_SAS2208_3:
11704 case MPI2_MFGPAGE_DEVID_SAS2208_4:
11705 case MPI2_MFGPAGE_DEVID_SAS2208_5:
11706 case MPI2_MFGPAGE_DEVID_SAS2208_6:
11707 case MPI2_MFGPAGE_DEVID_SAS2308_1:
11708 case MPI2_MFGPAGE_DEVID_SAS2308_2:
11709 case MPI2_MFGPAGE_DEVID_SAS2308_3:
11710 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
11711 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
11712 return MPI2_VERSION;
11713 case MPI25_MFGPAGE_DEVID_SAS3004:
11714 case MPI25_MFGPAGE_DEVID_SAS3008:
11715 case MPI25_MFGPAGE_DEVID_SAS3108_1:
11716 case MPI25_MFGPAGE_DEVID_SAS3108_2:
11717 case MPI25_MFGPAGE_DEVID_SAS3108_5:
11718 case MPI25_MFGPAGE_DEVID_SAS3108_6:
11719 return MPI25_VERSION;
11720 case MPI26_MFGPAGE_DEVID_SAS3216:
11721 case MPI26_MFGPAGE_DEVID_SAS3224:
11722 case MPI26_MFGPAGE_DEVID_SAS3316_1:
11723 case MPI26_MFGPAGE_DEVID_SAS3316_2:
11724 case MPI26_MFGPAGE_DEVID_SAS3316_3:
11725 case MPI26_MFGPAGE_DEVID_SAS3316_4:
11726 case MPI26_MFGPAGE_DEVID_SAS3324_1:
11727 case MPI26_MFGPAGE_DEVID_SAS3324_2:
11728 case MPI26_MFGPAGE_DEVID_SAS3324_3:
11729 case MPI26_MFGPAGE_DEVID_SAS3324_4:
11730 case MPI26_MFGPAGE_DEVID_SAS3508:
11731 case MPI26_MFGPAGE_DEVID_SAS3508_1:
11732 case MPI26_MFGPAGE_DEVID_SAS3408:
11733 case MPI26_MFGPAGE_DEVID_SAS3516:
11734 case MPI26_MFGPAGE_DEVID_SAS3516_1:
11735 case MPI26_MFGPAGE_DEVID_SAS3416:
11736 case MPI26_MFGPAGE_DEVID_SAS3616:
11737 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
11738 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
11739 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
11740 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
11741 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
11742 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
11743 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
11744 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
11745 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
11746 return MPI26_VERSION;
11752 * _scsih_probe - attach and add scsi host
11753 * @pdev: PCI device struct
11754 * @id: pci device id
11756 * Return: 0 success, anything else error.
11759 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
11761 struct MPT3SAS_ADAPTER *ioc;
11762 struct Scsi_Host *shost = NULL;
11764 u16 hba_mpi_version;
11766 /* Determine in which MPI version class this pci device belongs */
11767 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
11768 if (hba_mpi_version == 0)
11771 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
11772 * for other generation HBA's return with -ENODEV
11774 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
11777 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
11778 * for other generation HBA's return with -ENODEV
11780 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
11781 || hba_mpi_version == MPI26_VERSION)))
11784 switch (hba_mpi_version) {
11786 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
11787 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
11788 /* Use mpt2sas driver host template for SAS 2.0 HBA's */
11789 shost = scsi_host_alloc(&mpt2sas_driver_template,
11790 sizeof(struct MPT3SAS_ADAPTER));
11793 ioc = shost_priv(shost);
11794 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
11795 ioc->hba_mpi_version_belonged = hba_mpi_version;
11796 ioc->id = mpt2_ids++;
11797 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
11798 switch (pdev->device) {
11799 case MPI2_MFGPAGE_DEVID_SSS6200:
11800 ioc->is_warpdrive = 1;
11801 ioc->hide_ir_msg = 1;
11803 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
11804 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
11805 ioc->is_mcpu_endpoint = 1;
11808 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
11812 if (multipath_on_hba == -1 || multipath_on_hba == 0)
11813 ioc->multipath_on_hba = 0;
11815 ioc->multipath_on_hba = 1;
11818 case MPI25_VERSION:
11819 case MPI26_VERSION:
11820 /* Use mpt3sas driver host template for SAS 3.0 HBA's */
11821 shost = scsi_host_alloc(&mpt3sas_driver_template,
11822 sizeof(struct MPT3SAS_ADAPTER));
11825 ioc = shost_priv(shost);
11826 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
11827 ioc->hba_mpi_version_belonged = hba_mpi_version;
11828 ioc->id = mpt3_ids++;
11829 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
11830 switch (pdev->device) {
11831 case MPI26_MFGPAGE_DEVID_SAS3508:
11832 case MPI26_MFGPAGE_DEVID_SAS3508_1:
11833 case MPI26_MFGPAGE_DEVID_SAS3408:
11834 case MPI26_MFGPAGE_DEVID_SAS3516:
11835 case MPI26_MFGPAGE_DEVID_SAS3516_1:
11836 case MPI26_MFGPAGE_DEVID_SAS3416:
11837 case MPI26_MFGPAGE_DEVID_SAS3616:
11838 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
11839 ioc->is_gen35_ioc = 1;
11841 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
11842 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
11843 dev_err(&pdev->dev,
11844 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
11845 pdev->device, pdev->subsystem_vendor,
11846 pdev->subsystem_device);
11848 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
11849 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
11850 dev_err(&pdev->dev,
11851 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
11852 pdev->device, pdev->subsystem_vendor,
11853 pdev->subsystem_device);
11855 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
11856 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
11857 dev_info(&pdev->dev,
11858 "HBA is in Configurable Secure mode\n");
11860 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
11861 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
11862 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
11865 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
11867 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
11868 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
11869 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
11870 ioc->combined_reply_queue = 1;
11871 if (ioc->is_gen35_ioc)
11872 ioc->combined_reply_index_count =
11873 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
11875 ioc->combined_reply_index_count =
11876 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
11879 switch (ioc->is_gen35_ioc) {
11881 if (multipath_on_hba == -1 || multipath_on_hba == 0)
11882 ioc->multipath_on_hba = 0;
11884 ioc->multipath_on_hba = 1;
11887 if (multipath_on_hba == -1 || multipath_on_hba > 0)
11888 ioc->multipath_on_hba = 1;
11890 ioc->multipath_on_hba = 0;
11900 INIT_LIST_HEAD(&ioc->list);
11901 spin_lock(&gioc_lock);
11902 list_add_tail(&ioc->list, &mpt3sas_ioc_list);
11903 spin_unlock(&gioc_lock);
11904 ioc->shost = shost;
11906 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
11907 ioc->tm_cb_idx = tm_cb_idx;
11908 ioc->ctl_cb_idx = ctl_cb_idx;
11909 ioc->base_cb_idx = base_cb_idx;
11910 ioc->port_enable_cb_idx = port_enable_cb_idx;
11911 ioc->transport_cb_idx = transport_cb_idx;
11912 ioc->scsih_cb_idx = scsih_cb_idx;
11913 ioc->config_cb_idx = config_cb_idx;
11914 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
11915 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
11916 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
11917 ioc->logging_level = logging_level;
11918 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
11919 /* Host waits for minimum of six seconds */
11920 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
11922 * Enable MEMORY MOVE support flag.
11924 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
11926 ioc->enable_sdev_max_qd = enable_sdev_max_qd;
11928 /* misc semaphores and spin locks */
11929 mutex_init(&ioc->reset_in_progress_mutex);
11930 /* initializing pci_access_mutex lock */
11931 mutex_init(&ioc->pci_access_mutex);
11932 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
11933 spin_lock_init(&ioc->scsi_lookup_lock);
11934 spin_lock_init(&ioc->sas_device_lock);
11935 spin_lock_init(&ioc->sas_node_lock);
11936 spin_lock_init(&ioc->fw_event_lock);
11937 spin_lock_init(&ioc->raid_device_lock);
11938 spin_lock_init(&ioc->pcie_device_lock);
11939 spin_lock_init(&ioc->diag_trigger_lock);
11941 INIT_LIST_HEAD(&ioc->sas_device_list);
11942 INIT_LIST_HEAD(&ioc->sas_device_init_list);
11943 INIT_LIST_HEAD(&ioc->sas_expander_list);
11944 INIT_LIST_HEAD(&ioc->enclosure_list);
11945 INIT_LIST_HEAD(&ioc->pcie_device_list);
11946 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
11947 INIT_LIST_HEAD(&ioc->fw_event_list);
11948 INIT_LIST_HEAD(&ioc->raid_device_list);
11949 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
11950 INIT_LIST_HEAD(&ioc->delayed_tr_list);
11951 INIT_LIST_HEAD(&ioc->delayed_sc_list);
11952 INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
11953 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
11954 INIT_LIST_HEAD(&ioc->reply_queue_list);
11955 INIT_LIST_HEAD(&ioc->port_table_list);
11957 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
11959 /* init shost parameters */
11960 shost->max_cmd_len = 32;
11961 shost->max_lun = max_lun;
11962 shost->transportt = mpt3sas_transport_template;
11963 shost->unique_id = ioc->id;
11965 if (ioc->is_mcpu_endpoint) {
11966 /* mCPU MPI support 64K max IO */
11967 shost->max_sectors = 128;
11968 ioc_info(ioc, "The max_sectors value is set to %d\n",
11969 shost->max_sectors);
11971 if (max_sectors != 0xFFFF) {
11972 if (max_sectors < 64) {
11973 shost->max_sectors = 64;
11974 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
11976 } else if (max_sectors > 32767) {
11977 shost->max_sectors = 32767;
11978 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
11981 shost->max_sectors = max_sectors & 0xFFFE;
11982 ioc_info(ioc, "The max_sectors value is set to %d\n",
11983 shost->max_sectors);
11987 /* register EEDP capabilities with SCSI layer */
11988 if (prot_mask >= 0)
11989 scsi_host_set_prot(shost, (prot_mask & 0x07));
11991 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
11992 | SHOST_DIF_TYPE2_PROTECTION
11993 | SHOST_DIF_TYPE3_PROTECTION);
11995 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
11998 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
11999 "fw_event_%s%d", ioc->driver_name, ioc->id);
12000 ioc->firmware_event_thread = alloc_ordered_workqueue(
12001 ioc->firmware_event_name, 0);
12002 if (!ioc->firmware_event_thread) {
12003 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12004 __FILE__, __LINE__, __func__);
12006 goto out_thread_fail;
12009 ioc->is_driver_loading = 1;
12010 if ((mpt3sas_base_attach(ioc))) {
12011 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12012 __FILE__, __LINE__, __func__);
12014 goto out_attach_fail;
12017 if (ioc->is_warpdrive) {
12018 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
12019 ioc->hide_drives = 0;
12020 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
12021 ioc->hide_drives = 1;
12023 if (mpt3sas_get_num_volumes(ioc))
12024 ioc->hide_drives = 1;
12026 ioc->hide_drives = 0;
12029 ioc->hide_drives = 0;
12031 rv = scsi_add_host(shost, &pdev->dev);
12033 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12034 __FILE__, __LINE__, __func__);
12035 goto out_add_shost_fail;
12038 scsi_scan_host(shost);
12039 mpt3sas_setup_debugfs(ioc);
12041 out_add_shost_fail:
12042 mpt3sas_base_detach(ioc);
12044 destroy_workqueue(ioc->firmware_event_thread);
12046 spin_lock(&gioc_lock);
12047 list_del(&ioc->list);
12048 spin_unlock(&gioc_lock);
12049 scsi_host_put(shost);
12054 * scsih_suspend - power management suspend main entry point
12055 * @dev: Device struct
12057 * Return: 0 success, anything else error.
12059 static int __maybe_unused
12060 scsih_suspend(struct device *dev)
12062 struct pci_dev *pdev = to_pci_dev(dev);
12063 struct Scsi_Host *shost;
12064 struct MPT3SAS_ADAPTER *ioc;
12067 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12071 mpt3sas_base_stop_watchdog(ioc);
12072 flush_scheduled_work();
12073 scsi_block_requests(shost);
12074 _scsih_nvme_shutdown(ioc);
12075 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12076 pdev, pci_name(pdev));
12078 mpt3sas_base_free_resources(ioc);
12083 * scsih_resume - power management resume main entry point
12084 * @dev: Device struct
12086 * Return: 0 success, anything else error.
12088 static int __maybe_unused
12089 scsih_resume(struct device *dev)
12091 struct pci_dev *pdev = to_pci_dev(dev);
12092 struct Scsi_Host *shost;
12093 struct MPT3SAS_ADAPTER *ioc;
12094 pci_power_t device_state = pdev->current_state;
12097 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12101 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12102 pdev, pci_name(pdev), device_state);
12105 r = mpt3sas_base_map_resources(ioc);
12108 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
12109 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
12110 scsi_unblock_requests(shost);
12111 mpt3sas_base_start_watchdog(ioc);
12116 * scsih_pci_error_detected - Called when a PCI error is detected.
12117 * @pdev: PCI device struct
12118 * @state: PCI channel state
12120 * Description: Called when a PCI error is detected.
12122 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
12124 static pci_ers_result_t
12125 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12127 struct Scsi_Host *shost;
12128 struct MPT3SAS_ADAPTER *ioc;
12130 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12131 return PCI_ERS_RESULT_DISCONNECT;
12133 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
12136 case pci_channel_io_normal:
12137 return PCI_ERS_RESULT_CAN_RECOVER;
12138 case pci_channel_io_frozen:
12139 /* Fatal error, prepare for slot reset */
12140 ioc->pci_error_recovery = 1;
12141 scsi_block_requests(ioc->shost);
12142 mpt3sas_base_stop_watchdog(ioc);
12143 mpt3sas_base_free_resources(ioc);
12144 return PCI_ERS_RESULT_NEED_RESET;
12145 case pci_channel_io_perm_failure:
12146 /* Permanent error, prepare for device removal */
12147 ioc->pci_error_recovery = 1;
12148 mpt3sas_base_stop_watchdog(ioc);
12149 _scsih_flush_running_cmds(ioc);
12150 return PCI_ERS_RESULT_DISCONNECT;
12152 return PCI_ERS_RESULT_NEED_RESET;
12156 * scsih_pci_slot_reset - Called when PCI slot has been reset.
12157 * @pdev: PCI device struct
12159 * Description: This routine is called by the pci error recovery
12160 * code after the PCI slot has been reset, just before we
12161 * should resume normal operations.
12163 static pci_ers_result_t
12164 scsih_pci_slot_reset(struct pci_dev *pdev)
12166 struct Scsi_Host *shost;
12167 struct MPT3SAS_ADAPTER *ioc;
12170 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12171 return PCI_ERS_RESULT_DISCONNECT;
12173 ioc_info(ioc, "PCI error: slot reset callback!!\n");
12175 ioc->pci_error_recovery = 0;
12177 pci_restore_state(pdev);
12178 rc = mpt3sas_base_map_resources(ioc);
12180 return PCI_ERS_RESULT_DISCONNECT;
12182 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
12183 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
12185 ioc_warn(ioc, "hard reset: %s\n",
12186 (rc == 0) ? "success" : "failed");
12189 return PCI_ERS_RESULT_RECOVERED;
12191 return PCI_ERS_RESULT_DISCONNECT;
12195 * scsih_pci_resume() - resume normal ops after PCI reset
12196 * @pdev: pointer to PCI device
12198 * Called when the error recovery driver tells us that its
12199 * OK to resume normal operation. Use completion to allow
12200 * halted scsi ops to resume.
12203 scsih_pci_resume(struct pci_dev *pdev)
12205 struct Scsi_Host *shost;
12206 struct MPT3SAS_ADAPTER *ioc;
12208 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12211 ioc_info(ioc, "PCI error: resume callback!!\n");
12213 mpt3sas_base_start_watchdog(ioc);
12214 scsi_unblock_requests(ioc->shost);
12218 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
12219 * @pdev: pointer to PCI device
12221 static pci_ers_result_t
12222 scsih_pci_mmio_enabled(struct pci_dev *pdev)
12224 struct Scsi_Host *shost;
12225 struct MPT3SAS_ADAPTER *ioc;
12227 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12228 return PCI_ERS_RESULT_DISCONNECT;
12230 ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
12232 /* TODO - dump whatever for debugging purposes */
12234 /* This called only if scsih_pci_error_detected returns
12235 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
12236 * works, no need to reset slot.
12238 return PCI_ERS_RESULT_RECOVERED;
12242 * scsih__ncq_prio_supp - Check for NCQ command priority support
12243 * @sdev: scsi device struct
12245 * This is called when a user indicates they would like to enable
12246 * ncq command priorities. This works only on SATA devices.
12248 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
12250 unsigned char *buf;
12251 bool ncq_prio_supp = false;
12253 if (!scsi_device_supports_vpd(sdev))
12254 return ncq_prio_supp;
12256 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
12258 return ncq_prio_supp;
12260 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
12261 ncq_prio_supp = (buf[213] >> 4) & 1;
12264 return ncq_prio_supp;
12267 * The pci device ids are defined in mpi/mpi2_cnfg.h.
12269 static const struct pci_device_id mpt3sas_pci_table[] = {
12270 /* Spitfire ~ 2004 */
12271 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12272 PCI_ANY_ID, PCI_ANY_ID },
12273 /* Falcon ~ 2008 */
12274 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12275 PCI_ANY_ID, PCI_ANY_ID },
12276 /* Liberator ~ 2108 */
12277 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12278 PCI_ANY_ID, PCI_ANY_ID },
12279 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12280 PCI_ANY_ID, PCI_ANY_ID },
12281 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12282 PCI_ANY_ID, PCI_ANY_ID },
12283 /* Meteor ~ 2116 */
12284 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12285 PCI_ANY_ID, PCI_ANY_ID },
12286 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12287 PCI_ANY_ID, PCI_ANY_ID },
12288 /* Thunderbolt ~ 2208 */
12289 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12290 PCI_ANY_ID, PCI_ANY_ID },
12291 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12292 PCI_ANY_ID, PCI_ANY_ID },
12293 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12294 PCI_ANY_ID, PCI_ANY_ID },
12295 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12296 PCI_ANY_ID, PCI_ANY_ID },
12297 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12298 PCI_ANY_ID, PCI_ANY_ID },
12299 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12300 PCI_ANY_ID, PCI_ANY_ID },
12301 /* Mustang ~ 2308 */
12302 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12303 PCI_ANY_ID, PCI_ANY_ID },
12304 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12305 PCI_ANY_ID, PCI_ANY_ID },
12306 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12307 PCI_ANY_ID, PCI_ANY_ID },
12308 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
12309 PCI_ANY_ID, PCI_ANY_ID },
12310 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12311 PCI_ANY_ID, PCI_ANY_ID },
12313 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12314 PCI_ANY_ID, PCI_ANY_ID },
12315 /* Fury ~ 3004 and 3008 */
12316 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12317 PCI_ANY_ID, PCI_ANY_ID },
12318 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12319 PCI_ANY_ID, PCI_ANY_ID },
12320 /* Invader ~ 3108 */
12321 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12322 PCI_ANY_ID, PCI_ANY_ID },
12323 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12324 PCI_ANY_ID, PCI_ANY_ID },
12325 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12326 PCI_ANY_ID, PCI_ANY_ID },
12327 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12328 PCI_ANY_ID, PCI_ANY_ID },
12329 /* Cutlass ~ 3216 and 3224 */
12330 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12331 PCI_ANY_ID, PCI_ANY_ID },
12332 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12333 PCI_ANY_ID, PCI_ANY_ID },
12334 /* Intruder ~ 3316 and 3324 */
12335 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12336 PCI_ANY_ID, PCI_ANY_ID },
12337 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12338 PCI_ANY_ID, PCI_ANY_ID },
12339 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12340 PCI_ANY_ID, PCI_ANY_ID },
12341 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12342 PCI_ANY_ID, PCI_ANY_ID },
12343 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12344 PCI_ANY_ID, PCI_ANY_ID },
12345 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12346 PCI_ANY_ID, PCI_ANY_ID },
12347 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12348 PCI_ANY_ID, PCI_ANY_ID },
12349 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12350 PCI_ANY_ID, PCI_ANY_ID },
12351 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
12352 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12353 PCI_ANY_ID, PCI_ANY_ID },
12354 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12355 PCI_ANY_ID, PCI_ANY_ID },
12356 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12357 PCI_ANY_ID, PCI_ANY_ID },
12358 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12359 PCI_ANY_ID, PCI_ANY_ID },
12360 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12361 PCI_ANY_ID, PCI_ANY_ID },
12362 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12363 PCI_ANY_ID, PCI_ANY_ID },
12364 /* Mercator ~ 3616*/
12365 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12366 PCI_ANY_ID, PCI_ANY_ID },
12368 /* Aero SI 0x00E1 Configurable Secure
12369 * 0x00E2 Hard Secure
12371 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12372 PCI_ANY_ID, PCI_ANY_ID },
12373 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12374 PCI_ANY_ID, PCI_ANY_ID },
12377 * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
12379 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12380 PCI_ANY_ID, PCI_ANY_ID },
12381 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12382 PCI_ANY_ID, PCI_ANY_ID },
12384 /* Atlas PCIe Switch Management Port */
12385 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12386 PCI_ANY_ID, PCI_ANY_ID },
12388 /* Sea SI 0x00E5 Configurable Secure
12389 * 0x00E6 Hard Secure
12391 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12392 PCI_ANY_ID, PCI_ANY_ID },
12393 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12394 PCI_ANY_ID, PCI_ANY_ID },
12397 * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
12399 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12400 PCI_ANY_ID, PCI_ANY_ID },
12401 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12402 PCI_ANY_ID, PCI_ANY_ID },
12404 {0} /* Terminating entry */
12406 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12408 static struct pci_error_handlers _mpt3sas_err_handler = {
12409 .error_detected = scsih_pci_error_detected,
12410 .mmio_enabled = scsih_pci_mmio_enabled,
12411 .slot_reset = scsih_pci_slot_reset,
12412 .resume = scsih_pci_resume,
12415 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12417 static struct pci_driver mpt3sas_driver = {
12418 .name = MPT3SAS_DRIVER_NAME,
12419 .id_table = mpt3sas_pci_table,
12420 .probe = _scsih_probe,
12421 .remove = scsih_remove,
12422 .shutdown = scsih_shutdown,
12423 .err_handler = &_mpt3sas_err_handler,
12424 .driver.pm = &scsih_pm_ops,
12428 * scsih_init - main entry point for this driver.
12430 * Return: 0 success, anything else error.
12438 mpt3sas_base_initialize_callback_handler();
12440 /* queuecommand callback hander */
12441 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12443 /* task management callback handler */
12444 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12446 /* base internal commands callback handler */
12447 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12448 port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12449 mpt3sas_port_enable_done);
12451 /* transport internal commands callback handler */
12452 transport_cb_idx = mpt3sas_base_register_callback_handler(
12453 mpt3sas_transport_done);
12455 /* scsih internal commands callback handler */
12456 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12458 /* configuration page API internal commands callback handler */
12459 config_cb_idx = mpt3sas_base_register_callback_handler(
12460 mpt3sas_config_done);
12462 /* ctl module callback handler */
12463 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12465 tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12466 _scsih_tm_tr_complete);
12468 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12469 _scsih_tm_volume_tr_complete);
12471 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12472 _scsih_sas_control_complete);
12474 mpt3sas_init_debugfs();
12479 * scsih_exit - exit point for this driver (when it is a module).
12481 * Return: 0 success, anything else error.
12487 mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12488 mpt3sas_base_release_callback_handler(tm_cb_idx);
12489 mpt3sas_base_release_callback_handler(base_cb_idx);
12490 mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12491 mpt3sas_base_release_callback_handler(transport_cb_idx);
12492 mpt3sas_base_release_callback_handler(scsih_cb_idx);
12493 mpt3sas_base_release_callback_handler(config_cb_idx);
12494 mpt3sas_base_release_callback_handler(ctl_cb_idx);
12496 mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12497 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12498 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12500 /* raid transport support */
12501 if (hbas_to_enumerate != 1)
12502 raid_class_release(mpt3sas_raid_template);
12503 if (hbas_to_enumerate != 2)
12504 raid_class_release(mpt2sas_raid_template);
12505 sas_release_transport(mpt3sas_transport_template);
12506 mpt3sas_exit_debugfs();
12510 * _mpt3sas_init - main entry point for this driver.
12512 * Return: 0 success, anything else error.
12515 _mpt3sas_init(void)
12519 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12520 MPT3SAS_DRIVER_VERSION);
12522 mpt3sas_transport_template =
12523 sas_attach_transport(&mpt3sas_transport_functions);
12524 if (!mpt3sas_transport_template)
12527 /* No need attach mpt3sas raid functions template
12528 * if hbas_to_enumarate value is one.
12530 if (hbas_to_enumerate != 1) {
12531 mpt3sas_raid_template =
12532 raid_class_attach(&mpt3sas_raid_functions);
12533 if (!mpt3sas_raid_template) {
12534 sas_release_transport(mpt3sas_transport_template);
12539 /* No need to attach mpt2sas raid functions template
12540 * if hbas_to_enumarate value is two
12542 if (hbas_to_enumerate != 2) {
12543 mpt2sas_raid_template =
12544 raid_class_attach(&mpt2sas_raid_functions);
12545 if (!mpt2sas_raid_template) {
12546 sas_release_transport(mpt3sas_transport_template);
12551 error = scsih_init();
12557 mpt3sas_ctl_init(hbas_to_enumerate);
12559 error = pci_register_driver(&mpt3sas_driver);
12567 * _mpt3sas_exit - exit point for this driver (when it is a module).
12571 _mpt3sas_exit(void)
12573 pr_info("mpt3sas version %s unloading\n",
12574 MPT3SAS_DRIVER_VERSION);
12576 mpt3sas_ctl_exit(hbas_to_enumerate);
12578 pci_unregister_driver(&mpt3sas_driver);
12583 module_init(_mpt3sas_init);
12584 module_exit(_mpt3sas_exit);