2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
6 * Copyright (C) 2012-2014 LSI Corporation
7 * Copyright (C) 2013-2014 Avago Technologies
8 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/kdev_t.h>
54 #include <linux/blkdev.h>
55 #include <linux/delay.h>
56 #include <linux/interrupt.h>
57 #include <linux/dma-mapping.h>
59 #include <linux/time.h>
60 #include <linux/ktime.h>
61 #include <linux/kthread.h>
62 #include <asm/page.h> /* To get host page size per arch */
63 #include <linux/aer.h>
66 #include "mpt3sas_base.h"
68 static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
71 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
73 /* maximum controller queue depth */
74 #define MAX_HBA_QUEUE_DEPTH 30000
75 #define MAX_CHAIN_DEPTH 100000
76 static int max_queue_depth = -1;
77 module_param(max_queue_depth, int, 0444);
78 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
80 static int max_sgl_entries = -1;
81 module_param(max_sgl_entries, int, 0444);
82 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
84 static int msix_disable = -1;
85 module_param(msix_disable, int, 0444);
86 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
88 static int smp_affinity_enable = 1;
89 module_param(smp_affinity_enable, int, 0444);
90 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
92 static int max_msix_vectors = -1;
93 module_param(max_msix_vectors, int, 0444);
94 MODULE_PARM_DESC(max_msix_vectors,
97 static int irqpoll_weight = -1;
98 module_param(irqpoll_weight, int, 0444);
99 MODULE_PARM_DESC(irqpoll_weight,
100 "irq poll weight (default= one fourth of HBA queue depth)");
102 static int mpt3sas_fwfault_debug;
103 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
104 " enable detection of firmware fault and halt firmware - (default=0)");
106 static int perf_mode = -1;
107 module_param(perf_mode, int, 0444);
108 MODULE_PARM_DESC(perf_mode,
109 "Performance mode (only for Aero/Sea Generation), options:\n\t\t"
110 "0 - balanced: high iops mode is enabled &\n\t\t"
111 "interrupt coalescing is enabled only on high iops queues,\n\t\t"
112 "1 - iops: high iops mode is disabled &\n\t\t"
113 "interrupt coalescing is enabled on all queues,\n\t\t"
114 "2 - latency: high iops mode is disabled &\n\t\t"
115 "interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
116 "\t\tdefault - default perf_mode is 'balanced'"
119 enum mpt3sas_perf_mode {
120 MPT_PERF_MODE_DEFAULT = -1,
121 MPT_PERF_MODE_BALANCED = 0,
122 MPT_PERF_MODE_IOPS = 1,
123 MPT_PERF_MODE_LATENCY = 2,
127 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
128 u32 ioc_state, int timeout);
130 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
132 _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
135 * mpt3sas_base_check_cmd_timeout - Function
136 * to check timeout and command termination due
139 * @ioc: per adapter object.
140 * @status: Status of issued command.
141 * @mpi_request:mf request pointer.
142 * @sz: size of buffer.
144 * Return: 1/0 Reset to be done or Not
147 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
148 u8 status, void *mpi_request, int sz)
152 if (!(status & MPT3_CMD_RESET))
155 ioc_err(ioc, "Command %s\n",
156 issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
157 _debug_dump_mf(mpi_request, sz);
163 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
170 _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
172 int ret = param_set_int(val, kp);
173 struct MPT3SAS_ADAPTER *ioc;
178 /* global ioc spinlock to protect controller list on list operations */
179 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
180 spin_lock(&gioc_lock);
181 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
182 ioc->fwfault_debug = mpt3sas_fwfault_debug;
183 spin_unlock(&gioc_lock);
186 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
187 param_get_int, &mpt3sas_fwfault_debug, 0644);
190 * _base_readl_aero - retry readl for max three times.
191 * @addr: MPT Fusion system interface register address
193 * Retry the readl() for max three times if it gets zero value
194 * while reading the system interface register.
197 _base_readl_aero(const volatile void __iomem *addr)
202 ret_val = readl(addr);
204 } while (ret_val == 0 && i < 3);
210 _base_readl(const volatile void __iomem *addr)
216 * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
219 * @ioc: per adapter object
220 * @reply: reply message frame(lower 32bit addr)
221 * @index: System request message index.
224 _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
228 * 256 is offset within sys register.
229 * 256 offset MPI frame starts. Max MPI frame supported is 32.
230 * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
232 u16 cmd_credit = ioc->facts.RequestCredit + 1;
233 void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
234 MPI_FRAME_START_OFFSET +
235 (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
237 writel(reply, reply_free_iomem);
241 * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
242 * to system/BAR0 region.
244 * @dst_iomem: Pointer to the destination location in BAR0 space.
245 * @src: Pointer to the Source data.
246 * @size: Size of data to be copied.
249 _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
252 u32 *src_virt_mem = (u32 *)src;
254 for (i = 0; i < size/4; i++)
255 writel((u32)src_virt_mem[i],
256 (void __iomem *)dst_iomem + (i * 4));
260 * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
262 * @dst_iomem: Pointer to the destination location in BAR0 space.
263 * @src: Pointer to the Source data.
264 * @size: Size of data to be copied.
267 _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
270 u32 *src_virt_mem = (u32 *)(src);
272 for (i = 0; i < size/4; i++)
273 writel((u32)src_virt_mem[i],
274 (void __iomem *)dst_iomem + (i * 4));
278 * _base_get_chain - Calculates and Returns virtual chain address
279 * for the provided smid in BAR0 space.
281 * @ioc: per adapter object
282 * @smid: system request message index
283 * @sge_chain_count: Scatter gather chain count.
285 * Return: the chain address.
287 static inline void __iomem*
288 _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
291 void __iomem *base_chain, *chain_virt;
292 u16 cmd_credit = ioc->facts.RequestCredit + 1;
294 base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
295 (cmd_credit * ioc->request_sz) +
296 REPLY_FREE_POOL_SIZE;
297 chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
298 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
303 * _base_get_chain_phys - Calculates and Returns physical address
304 * in BAR0 for scatter gather chains, for
307 * @ioc: per adapter object
308 * @smid: system request message index
309 * @sge_chain_count: Scatter gather chain count.
311 * Return: Physical chain address.
313 static inline phys_addr_t
314 _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
317 phys_addr_t base_chain_phys, chain_phys;
318 u16 cmd_credit = ioc->facts.RequestCredit + 1;
320 base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET +
321 (cmd_credit * ioc->request_sz) +
322 REPLY_FREE_POOL_SIZE;
323 chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
324 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
329 * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
330 * buffer address for the provided smid.
331 * (Each smid can have 64K starts from 17024)
333 * @ioc: per adapter object
334 * @smid: system request message index
336 * Return: Pointer to buffer location in BAR0.
339 static void __iomem *
340 _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
342 u16 cmd_credit = ioc->facts.RequestCredit + 1;
343 // Added extra 1 to reach end of chain.
344 void __iomem *chain_end = _base_get_chain(ioc,
346 ioc->facts.MaxChainDepth);
347 return chain_end + (smid * 64 * 1024);
351 * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
352 * Host buffer Physical address for the provided smid.
353 * (Each smid can have 64K starts from 17024)
355 * @ioc: per adapter object
356 * @smid: system request message index
358 * Return: Pointer to buffer location in BAR0.
361 _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
363 u16 cmd_credit = ioc->facts.RequestCredit + 1;
364 phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
366 ioc->facts.MaxChainDepth);
367 return chain_end_phys + (smid * 64 * 1024);
371 * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
372 * lookup list and Provides chain_buffer
373 * address for the matching dma address.
374 * (Each smid can have 64K starts from 17024)
376 * @ioc: per adapter object
377 * @chain_buffer_dma: Chain buffer dma address.
379 * Return: Pointer to chain buffer. Or Null on Failure.
382 _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
383 dma_addr_t chain_buffer_dma)
386 struct chain_tracker *ct;
388 for (index = 0; index < ioc->scsiio_depth; index++) {
389 for (j = 0; j < ioc->chains_needed_per_io; j++) {
390 ct = &ioc->chain_lookup[index].chains_per_smid[j];
391 if (ct && ct->chain_buffer_dma == chain_buffer_dma)
392 return ct->chain_buffer;
395 ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
400 * _clone_sg_entries - MPI EP's scsiio and config requests
401 * are handled here. Base function for
402 * double buffering, before submitting
405 * @ioc: per adapter object.
406 * @mpi_request: mf request pointer.
407 * @smid: system request message index.
409 static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
410 void *mpi_request, u16 smid)
412 Mpi2SGESimple32_t *sgel, *sgel_next;
413 u32 sgl_flags, sge_chain_count = 0;
414 bool is_write = false;
416 void __iomem *buffer_iomem;
417 phys_addr_t buffer_iomem_phys;
418 void __iomem *buff_ptr;
419 phys_addr_t buff_ptr_phys;
420 void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
421 void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
422 phys_addr_t dst_addr_phys;
423 MPI2RequestHeader_t *request_hdr;
424 struct scsi_cmnd *scmd;
425 struct scatterlist *sg_scmd = NULL;
426 int is_scsiio_req = 0;
428 request_hdr = (MPI2RequestHeader_t *) mpi_request;
430 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
431 Mpi25SCSIIORequest_t *scsiio_request =
432 (Mpi25SCSIIORequest_t *)mpi_request;
433 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
435 } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
436 Mpi2ConfigRequest_t *config_req =
437 (Mpi2ConfigRequest_t *)mpi_request;
438 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
442 /* From smid we can get scsi_cmd, once we have sg_scmd,
443 * we just need to get sg_virt and sg_next to get virtual
444 * address associated with sgel->Address.
448 /* Get scsi_cmd using smid */
449 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
451 ioc_err(ioc, "scmd is NULL\n");
455 /* Get sg_scmd from scmd provided */
456 sg_scmd = scsi_sglist(scmd);
460 * 0 - 255 System register
461 * 256 - 4352 MPI Frame. (This is based on maxCredit 32)
462 * 4352 - 4864 Reply_free pool (512 byte is reserved
463 * considering maxCredit 32. Reply need extra
464 * room, for mCPU case kept four times of
466 * 4864 - 17152 SGE chain element. (32cmd * 3 chain of
467 * 128 byte size = 12288)
468 * 17152 - x Host buffer mapped with smid.
469 * (Each smid can have 64K Max IO.)
470 * BAR0+Last 1K MSIX Addr and Data
471 * Total size in use 2113664 bytes of 4MB BAR0
474 buffer_iomem = _base_get_buffer_bar0(ioc, smid);
475 buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
477 buff_ptr = buffer_iomem;
478 buff_ptr_phys = buffer_iomem_phys;
479 WARN_ON(buff_ptr_phys > U32_MAX);
481 if (le32_to_cpu(sgel->FlagsLength) &
482 (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
485 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
488 (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
490 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
491 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
493 * Helper function which on passing
494 * chain_buffer_dma returns chain_buffer. Get
495 * the virtual address for sgel->Address
498 _base_get_chain_buffer_dma_to_chain_buffer(ioc,
499 le32_to_cpu(sgel->Address));
500 if (sgel_next == NULL)
503 * This is coping 128 byte chain
504 * frame (not a host buffer)
506 dst_chain_addr[sge_chain_count] =
508 smid, sge_chain_count);
509 src_chain_addr[sge_chain_count] =
511 dst_addr_phys = _base_get_chain_phys(ioc,
512 smid, sge_chain_count);
513 WARN_ON(dst_addr_phys > U32_MAX);
515 cpu_to_le32(lower_32_bits(dst_addr_phys));
519 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
522 _base_clone_to_sys_mem(buff_ptr,
524 (le32_to_cpu(sgel->FlagsLength) &
527 * FIXME: this relies on a a zero
531 cpu_to_le32((u32)buff_ptr_phys);
533 _base_clone_to_sys_mem(buff_ptr,
535 (le32_to_cpu(sgel->FlagsLength) &
538 cpu_to_le32((u32)buff_ptr_phys);
541 buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
543 buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
545 if ((le32_to_cpu(sgel->FlagsLength) &
546 (MPI2_SGE_FLAGS_END_OF_BUFFER
547 << MPI2_SGE_FLAGS_SHIFT)))
548 goto eob_clone_chain;
551 * Every single element in MPT will have
552 * associated sg_next. Better to sanity that
553 * sg_next is not NULL, but it will be a bug
557 sg_scmd = sg_next(sg_scmd);
561 goto eob_clone_chain;
569 for (i = 0; i < sge_chain_count; i++) {
571 _base_clone_to_sys_mem(dst_chain_addr[i],
572 src_chain_addr[i], ioc->request_sz);
577 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
578 * @arg: input argument, used to derive ioc
581 * 0 if controller is removed from pci subsystem.
584 static int mpt3sas_remove_dead_ioc_func(void *arg)
586 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
587 struct pci_dev *pdev;
595 pci_stop_and_remove_bus_device_locked(pdev);
600 * _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp.
601 * @ioc: Per Adapter Object
605 static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc)
607 Mpi26IoUnitControlRequest_t *mpi_request;
608 Mpi26IoUnitControlReply_t *mpi_reply;
610 ktime_t current_time;
614 mutex_lock(&ioc->scsih_cmds.mutex);
615 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
616 ioc_err(ioc, "scsih_cmd in use %s\n", __func__);
619 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
620 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
622 ioc_err(ioc, "Failed obtaining a smid %s\n", __func__);
623 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
626 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
627 ioc->scsih_cmds.smid = smid;
628 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
629 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
630 mpi_request->Operation = MPI26_CTRL_OP_SET_IOC_PARAMETER;
631 mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP;
632 current_time = ktime_get_real();
633 TimeStamp = ktime_to_ms(current_time);
634 mpi_request->Reserved7 = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
635 mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp >> 32);
636 init_completion(&ioc->scsih_cmds.done);
637 ioc->put_smid_default(ioc, smid);
638 dinitprintk(ioc, ioc_info(ioc,
639 "Io Unit Control Sync TimeStamp (sending), @time %lld ms\n",
641 wait_for_completion_timeout(&ioc->scsih_cmds.done,
642 MPT3SAS_TIMESYNC_TIMEOUT_SECONDS*HZ);
643 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
644 mpt3sas_check_cmd_timeout(ioc,
645 ioc->scsih_cmds.status, mpi_request,
646 sizeof(Mpi2SasIoUnitControlRequest_t)/4, issue_reset);
647 goto issue_host_reset;
649 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
650 mpi_reply = ioc->scsih_cmds.reply;
651 dinitprintk(ioc, ioc_info(ioc,
652 "Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
653 le16_to_cpu(mpi_reply->IOCStatus),
654 le32_to_cpu(mpi_reply->IOCLogInfo)));
658 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
659 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
661 mutex_unlock(&ioc->scsih_cmds.mutex);
665 * _base_fault_reset_work - workq handling ioc fault conditions
666 * @work: input argument, used to derive ioc
671 _base_fault_reset_work(struct work_struct *work)
673 struct MPT3SAS_ADAPTER *ioc =
674 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
678 struct task_struct *p;
681 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
682 if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
683 ioc->pci_error_recovery)
685 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
687 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
688 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
689 ioc_err(ioc, "SAS host is non-operational !!!!\n");
691 /* It may be possible that EEH recovery can resolve some of
692 * pci bus failure issues rather removing the dead ioc function
693 * by considering controller is in a non-operational state. So
694 * here priority is given to the EEH recovery. If it doesn't
695 * not resolve this issue, mpt3sas driver will consider this
696 * controller to non-operational state and remove the dead ioc
699 if (ioc->non_operational_loop++ < 5) {
700 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
706 * Call _scsih_flush_pending_cmds callback so that we flush all
707 * pending commands back to OS. This call is required to avoid
708 * deadlock at block layer. Dead IOC will fail to do diag reset,
709 * and this call is safe since dead ioc will never return any
710 * command back from HW.
712 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
714 * Set remove_host flag early since kernel thread will
715 * take some time to execute.
717 ioc->remove_host = 1;
718 /*Remove the Dead Host */
719 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
720 "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
722 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
725 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
727 return; /* don't rearm timer */
730 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
731 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
732 ioc->manu_pg11.CoreDumpTOSec :
733 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
735 timeout /= (FAULT_POLLING_INTERVAL/1000);
737 if (ioc->ioc_coredump_loop == 0) {
738 mpt3sas_print_coredump_info(ioc,
739 doorbell & MPI2_DOORBELL_DATA_MASK);
740 /* do not accept any IOs and disable the interrupts */
742 &ioc->ioc_reset_in_progress_lock, flags);
743 ioc->shost_recovery = 1;
744 spin_unlock_irqrestore(
745 &ioc->ioc_reset_in_progress_lock, flags);
746 mpt3sas_base_mask_interrupts(ioc);
747 _base_clear_outstanding_commands(ioc);
750 ioc_info(ioc, "%s: CoreDump loop %d.",
751 __func__, ioc->ioc_coredump_loop);
753 /* Wait until CoreDump completes or times out */
754 if (ioc->ioc_coredump_loop++ < timeout) {
756 &ioc->ioc_reset_in_progress_lock, flags);
761 if (ioc->ioc_coredump_loop) {
762 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
763 ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
764 __func__, ioc->ioc_coredump_loop);
766 ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
767 __func__, ioc->ioc_coredump_loop);
768 ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
770 ioc->non_operational_loop = 0;
771 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
772 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
773 ioc_warn(ioc, "%s: hard reset: %s\n",
774 __func__, rc == 0 ? "success" : "failed");
775 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
776 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
777 mpt3sas_print_fault_code(ioc, doorbell &
778 MPI2_DOORBELL_DATA_MASK);
779 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
780 MPI2_IOC_STATE_COREDUMP)
781 mpt3sas_print_coredump_info(ioc, doorbell &
782 MPI2_DOORBELL_DATA_MASK);
783 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
784 MPI2_IOC_STATE_OPERATIONAL)
785 return; /* don't rearm timer */
787 ioc->ioc_coredump_loop = 0;
788 if (ioc->time_sync_interval &&
789 ++ioc->timestamp_update_count >= ioc->time_sync_interval) {
790 ioc->timestamp_update_count = 0;
791 _base_sync_drv_fw_timestamp(ioc);
793 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
795 if (ioc->fault_reset_work_q)
796 queue_delayed_work(ioc->fault_reset_work_q,
797 &ioc->fault_reset_work,
798 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
799 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
803 * mpt3sas_base_start_watchdog - start the fault_reset_work_q
804 * @ioc: per adapter object
809 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
813 if (ioc->fault_reset_work_q)
816 ioc->timestamp_update_count = 0;
817 /* initialize fault polling */
819 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
820 snprintf(ioc->fault_reset_work_q_name,
821 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
822 ioc->driver_name, ioc->id);
823 ioc->fault_reset_work_q =
824 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
825 if (!ioc->fault_reset_work_q) {
826 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
829 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
830 if (ioc->fault_reset_work_q)
831 queue_delayed_work(ioc->fault_reset_work_q,
832 &ioc->fault_reset_work,
833 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
834 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
838 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
839 * @ioc: per adapter object
844 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
847 struct workqueue_struct *wq;
849 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
850 wq = ioc->fault_reset_work_q;
851 ioc->fault_reset_work_q = NULL;
852 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
854 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
856 destroy_workqueue(wq);
861 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
862 * @ioc: per adapter object
863 * @fault_code: fault code
866 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
868 ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
872 * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
873 * @ioc: per adapter object
874 * @fault_code: fault code
879 mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
881 ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
885 * mpt3sas_base_wait_for_coredump_completion - Wait until coredump
886 * completes or times out
887 * @ioc: per adapter object
888 * @caller: caller function name
890 * Return: 0 for success, non-zero for failure.
893 mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
896 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
897 ioc->manu_pg11.CoreDumpTOSec :
898 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
900 int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
905 "%s: CoreDump timed out. (ioc_state=0x%x)\n",
909 "%s: CoreDump completed. (ioc_state=0x%x)\n",
916 * mpt3sas_halt_firmware - halt's mpt controller firmware
917 * @ioc: per adapter object
919 * For debugging timeout related issues. Writing 0xCOFFEE00
920 * to the doorbell register will halt controller firmware. With
921 * the purpose to stop both driver and firmware, the enduser can
922 * obtain a ring buffer from controller UART.
925 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
929 if (!ioc->fwfault_debug)
934 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
935 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
936 mpt3sas_print_fault_code(ioc, doorbell &
937 MPI2_DOORBELL_DATA_MASK);
938 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
939 MPI2_IOC_STATE_COREDUMP) {
940 mpt3sas_print_coredump_info(ioc, doorbell &
941 MPI2_DOORBELL_DATA_MASK);
943 writel(0xC0FFEE00, &ioc->chip->Doorbell);
944 ioc_err(ioc, "Firmware is halted due to command timeout\n");
947 if (ioc->fwfault_debug == 2)
951 panic("panic in %s\n", __func__);
955 * _base_sas_ioc_info - verbose translation of the ioc status
956 * @ioc: per adapter object
957 * @mpi_reply: reply mf payload returned from firmware
958 * @request_hdr: request mf
961 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
962 MPI2RequestHeader_t *request_hdr)
964 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
968 char *func_str = NULL;
970 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
971 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
972 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
973 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
976 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
979 * Older Firmware version doesn't support driver trigger pages.
980 * So, skip displaying 'config invalid type' type
983 if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
984 Mpi2ConfigRequest_t *rqst = (Mpi2ConfigRequest_t *)request_hdr;
986 if ((rqst->ExtPageType ==
987 MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER) &&
988 !(ioc->logging_level & MPT_DEBUG_CONFIG)) {
993 switch (ioc_status) {
995 /****************************************************************************
996 * Common IOCStatus values for all replies
997 ****************************************************************************/
999 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1000 desc = "invalid function";
1002 case MPI2_IOCSTATUS_BUSY:
1005 case MPI2_IOCSTATUS_INVALID_SGL:
1006 desc = "invalid sgl";
1008 case MPI2_IOCSTATUS_INTERNAL_ERROR:
1009 desc = "internal error";
1011 case MPI2_IOCSTATUS_INVALID_VPID:
1012 desc = "invalid vpid";
1014 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
1015 desc = "insufficient resources";
1017 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
1018 desc = "insufficient power";
1020 case MPI2_IOCSTATUS_INVALID_FIELD:
1021 desc = "invalid field";
1023 case MPI2_IOCSTATUS_INVALID_STATE:
1024 desc = "invalid state";
1026 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
1027 desc = "op state not supported";
1030 /****************************************************************************
1031 * Config IOCStatus values
1032 ****************************************************************************/
1034 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
1035 desc = "config invalid action";
1037 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
1038 desc = "config invalid type";
1040 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
1041 desc = "config invalid page";
1043 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
1044 desc = "config invalid data";
1046 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
1047 desc = "config no defaults";
1049 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
1050 desc = "config cant commit";
1053 /****************************************************************************
1055 ****************************************************************************/
1057 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1058 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1059 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1060 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1061 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1062 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1063 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1064 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1065 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1066 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1067 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1068 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1071 /****************************************************************************
1072 * For use by SCSI Initiator and SCSI Target end-to-end data protection
1073 ****************************************************************************/
1075 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1076 desc = "eedp guard error";
1078 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1079 desc = "eedp ref tag error";
1081 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1082 desc = "eedp app tag error";
1085 /****************************************************************************
1086 * SCSI Target values
1087 ****************************************************************************/
1089 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
1090 desc = "target invalid io index";
1092 case MPI2_IOCSTATUS_TARGET_ABORTED:
1093 desc = "target aborted";
1095 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
1096 desc = "target no conn retryable";
1098 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
1099 desc = "target no connection";
1101 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
1102 desc = "target xfer count mismatch";
1104 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
1105 desc = "target data offset error";
1107 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
1108 desc = "target too much write data";
1110 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
1111 desc = "target iu too short";
1113 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
1114 desc = "target ack nak timeout";
1116 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
1117 desc = "target nak received";
1120 /****************************************************************************
1121 * Serial Attached SCSI values
1122 ****************************************************************************/
1124 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
1125 desc = "smp request failed";
1127 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
1128 desc = "smp data overrun";
1131 /****************************************************************************
1132 * Diagnostic Buffer Post / Diagnostic Release values
1133 ****************************************************************************/
1135 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
1136 desc = "diagnostic released";
1145 switch (request_hdr->Function) {
1146 case MPI2_FUNCTION_CONFIG:
1147 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
1148 func_str = "config_page";
1150 case MPI2_FUNCTION_SCSI_TASK_MGMT:
1151 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
1152 func_str = "task_mgmt";
1154 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
1155 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
1156 func_str = "sas_iounit_ctl";
1158 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
1159 frame_sz = sizeof(Mpi2SepRequest_t);
1160 func_str = "enclosure";
1162 case MPI2_FUNCTION_IOC_INIT:
1163 frame_sz = sizeof(Mpi2IOCInitRequest_t);
1164 func_str = "ioc_init";
1166 case MPI2_FUNCTION_PORT_ENABLE:
1167 frame_sz = sizeof(Mpi2PortEnableRequest_t);
1168 func_str = "port_enable";
1170 case MPI2_FUNCTION_SMP_PASSTHROUGH:
1171 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
1172 func_str = "smp_passthru";
1174 case MPI2_FUNCTION_NVME_ENCAPSULATED:
1175 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
1177 func_str = "nvme_encapsulated";
1181 func_str = "unknown";
1185 ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
1186 desc, ioc_status, request_hdr, func_str);
1188 _debug_dump_mf(request_hdr, frame_sz/4);
1192 * _base_display_event_data - verbose translation of firmware asyn events
1193 * @ioc: per adapter object
1194 * @mpi_reply: reply mf payload returned from firmware
1197 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1198 Mpi2EventNotificationReply_t *mpi_reply)
1203 if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
1206 event = le16_to_cpu(mpi_reply->Event);
1209 case MPI2_EVENT_LOG_DATA:
1212 case MPI2_EVENT_STATE_CHANGE:
1213 desc = "Status Change";
1215 case MPI2_EVENT_HARD_RESET_RECEIVED:
1216 desc = "Hard Reset Received";
1218 case MPI2_EVENT_EVENT_CHANGE:
1219 desc = "Event Change";
1221 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1222 desc = "Device Status Change";
1224 case MPI2_EVENT_IR_OPERATION_STATUS:
1225 if (!ioc->hide_ir_msg)
1226 desc = "IR Operation Status";
1228 case MPI2_EVENT_SAS_DISCOVERY:
1230 Mpi2EventDataSasDiscovery_t *event_data =
1231 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1232 ioc_info(ioc, "Discovery: (%s)",
1233 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1235 if (event_data->DiscoveryStatus)
1236 pr_cont(" discovery_status(0x%08x)",
1237 le32_to_cpu(event_data->DiscoveryStatus));
1241 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1242 desc = "SAS Broadcast Primitive";
1244 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1245 desc = "SAS Init Device Status Change";
1247 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1248 desc = "SAS Init Table Overflow";
1250 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1251 desc = "SAS Topology Change List";
1253 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1254 desc = "SAS Enclosure Device Status Change";
1256 case MPI2_EVENT_IR_VOLUME:
1257 if (!ioc->hide_ir_msg)
1260 case MPI2_EVENT_IR_PHYSICAL_DISK:
1261 if (!ioc->hide_ir_msg)
1262 desc = "IR Physical Disk";
1264 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1265 if (!ioc->hide_ir_msg)
1266 desc = "IR Configuration Change List";
1268 case MPI2_EVENT_LOG_ENTRY_ADDED:
1269 if (!ioc->hide_ir_msg)
1270 desc = "Log Entry Added";
1272 case MPI2_EVENT_TEMP_THRESHOLD:
1273 desc = "Temperature Threshold";
1275 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1276 desc = "Cable Event";
1278 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1279 desc = "SAS Device Discovery Error";
1281 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1282 desc = "PCIE Device Status Change";
1284 case MPI2_EVENT_PCIE_ENUMERATION:
1286 Mpi26EventDataPCIeEnumeration_t *event_data =
1287 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1288 ioc_info(ioc, "PCIE Enumeration: (%s)",
1289 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1291 if (event_data->EnumerationStatus)
1292 pr_cont("enumeration_status(0x%08x)",
1293 le32_to_cpu(event_data->EnumerationStatus));
1297 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1298 desc = "PCIE Topology Change List";
1305 ioc_info(ioc, "%s\n", desc);
1309 * _base_sas_log_info - verbose translation of firmware log info
1310 * @ioc: per adapter object
1311 * @log_info: log info
1314 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
1316 union loginfo_type {
1325 union loginfo_type sas_loginfo;
1326 char *originator_str = NULL;
1328 sas_loginfo.loginfo = log_info;
1329 if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
1332 /* each nexus loss loginfo */
1333 if (log_info == 0x31170000)
1336 /* eat the loginfos associated with task aborts */
1337 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1338 0x31140000 || log_info == 0x31130000))
1341 switch (sas_loginfo.dw.originator) {
1343 originator_str = "IOP";
1346 originator_str = "PL";
1349 if (!ioc->hide_ir_msg)
1350 originator_str = "IR";
1352 originator_str = "WarpDrive";
1356 ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1358 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
1362 * _base_display_reply_info - handle reply descriptors depending on IOC Status
1363 * @ioc: per adapter object
1364 * @smid: system request message index
1365 * @msix_index: MSIX table index supplied by the OS
1366 * @reply: reply message frame (lower 32bit addr)
1369 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1372 MPI2DefaultReply_t *mpi_reply;
1376 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1377 if (unlikely(!mpi_reply)) {
1378 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1379 __FILE__, __LINE__, __func__);
1382 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
1384 if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1385 (ioc->logging_level & MPT_DEBUG_REPLY)) {
1386 _base_sas_ioc_info(ioc , mpi_reply,
1387 mpt3sas_base_get_msg_frame(ioc, smid));
1390 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1391 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1392 _base_sas_log_info(ioc, loginfo);
1395 if (ioc_status || loginfo) {
1396 ioc_status &= MPI2_IOCSTATUS_MASK;
1397 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1402 * mpt3sas_base_done - base internal command completion routine
1403 * @ioc: per adapter object
1404 * @smid: system request message index
1405 * @msix_index: MSIX table index supplied by the OS
1406 * @reply: reply message frame(lower 32bit addr)
1409 * 1 meaning mf should be freed from _base_interrupt
1410 * 0 means the mf is freed from this function.
1413 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1416 MPI2DefaultReply_t *mpi_reply;
1418 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1419 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
1420 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1422 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1425 ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1427 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1428 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1430 ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1432 complete(&ioc->base_cmds.done);
1437 * _base_async_event - main callback handler for firmware asyn events
1438 * @ioc: per adapter object
1439 * @msix_index: MSIX table index supplied by the OS
1440 * @reply: reply message frame(lower 32bit addr)
1443 * 1 meaning mf should be freed from _base_interrupt
1444 * 0 means the mf is freed from this function.
1447 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1449 Mpi2EventNotificationReply_t *mpi_reply;
1450 Mpi2EventAckRequest_t *ack_request;
1452 struct _event_ack_list *delayed_event_ack;
1454 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1457 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1460 _base_display_event_data(ioc, mpi_reply);
1462 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1464 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1466 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1468 if (!delayed_event_ack)
1470 INIT_LIST_HEAD(&delayed_event_ack->list);
1471 delayed_event_ack->Event = mpi_reply->Event;
1472 delayed_event_ack->EventContext = mpi_reply->EventContext;
1473 list_add_tail(&delayed_event_ack->list,
1474 &ioc->delayed_event_ack_list);
1476 ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1477 le16_to_cpu(mpi_reply->Event)));
1481 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1482 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1483 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1484 ack_request->Event = mpi_reply->Event;
1485 ack_request->EventContext = mpi_reply->EventContext;
1486 ack_request->VF_ID = 0; /* TODO */
1487 ack_request->VP_ID = 0;
1488 ioc->put_smid_default(ioc, smid);
1492 /* scsih callback handler */
1493 mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1495 /* ctl callback handler */
1496 mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1501 static struct scsiio_tracker *
1502 _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1504 struct scsi_cmnd *cmd;
1506 if (WARN_ON(!smid) ||
1507 WARN_ON(smid >= ioc->hi_priority_smid))
1510 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1512 return scsi_cmd_priv(cmd);
1518 * _base_get_cb_idx - obtain the callback index
1519 * @ioc: per adapter object
1520 * @smid: system request message index
1522 * Return: callback index.
1525 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1528 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1531 if (smid < ioc->hi_priority_smid) {
1532 struct scsiio_tracker *st;
1534 if (smid < ctl_smid) {
1535 st = _get_st_from_smid(ioc, smid);
1537 cb_idx = st->cb_idx;
1538 } else if (smid == ctl_smid)
1539 cb_idx = ioc->ctl_cb_idx;
1540 } else if (smid < ioc->internal_smid) {
1541 i = smid - ioc->hi_priority_smid;
1542 cb_idx = ioc->hpr_lookup[i].cb_idx;
1543 } else if (smid <= ioc->hba_queue_depth) {
1544 i = smid - ioc->internal_smid;
1545 cb_idx = ioc->internal_lookup[i].cb_idx;
1551 * mpt3sas_base_mask_interrupts - disable interrupts
1552 * @ioc: per adapter object
1554 * Disabling ResetIRQ, Reply and Doorbell Interrupts
1557 mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1561 ioc->mask_interrupts = 1;
1562 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1563 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1564 writel(him_register, &ioc->chip->HostInterruptMask);
1565 ioc->base_readl(&ioc->chip->HostInterruptMask);
1569 * mpt3sas_base_unmask_interrupts - enable interrupts
1570 * @ioc: per adapter object
1572 * Enabling only Reply Interrupts
1575 mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1579 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1580 him_register &= ~MPI2_HIM_RIM;
1581 writel(him_register, &ioc->chip->HostInterruptMask);
1582 ioc->mask_interrupts = 0;
1585 union reply_descriptor {
1593 static u32 base_mod64(u64 dividend, u32 divisor)
1598 pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
1599 remainder = do_div(dividend, divisor);
1604 * _base_process_reply_queue - Process reply descriptors from reply
1605 * descriptor post queue.
1606 * @reply_q: per IRQ's reply queue object.
1608 * Return: number of reply descriptors processed from reply
1612 _base_process_reply_queue(struct adapter_reply_queue *reply_q)
1614 union reply_descriptor rd;
1616 u8 request_descript_type;
1620 u8 msix_index = reply_q->msix_index;
1621 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1622 Mpi2ReplyDescriptorsUnion_t *rpf;
1626 if (!atomic_add_unless(&reply_q->busy, 1, 1))
1627 return completed_cmds;
1629 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1630 request_descript_type = rpf->Default.ReplyFlags
1631 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1632 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1633 atomic_dec(&reply_q->busy);
1634 return completed_cmds;
1639 rd.word = le64_to_cpu(rpf->Words);
1640 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1643 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1644 if (request_descript_type ==
1645 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1646 request_descript_type ==
1647 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1648 request_descript_type ==
1649 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
1650 cb_idx = _base_get_cb_idx(ioc, smid);
1651 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1652 (likely(mpt_callbacks[cb_idx] != NULL))) {
1653 rc = mpt_callbacks[cb_idx](ioc, smid,
1656 mpt3sas_base_free_smid(ioc, smid);
1658 } else if (request_descript_type ==
1659 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1660 reply = le32_to_cpu(
1661 rpf->AddressReply.ReplyFrameAddress);
1662 if (reply > ioc->reply_dma_max_address ||
1663 reply < ioc->reply_dma_min_address)
1666 cb_idx = _base_get_cb_idx(ioc, smid);
1667 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1668 (likely(mpt_callbacks[cb_idx] != NULL))) {
1669 rc = mpt_callbacks[cb_idx](ioc, smid,
1672 _base_display_reply_info(ioc,
1673 smid, msix_index, reply);
1675 mpt3sas_base_free_smid(ioc,
1679 _base_async_event(ioc, msix_index, reply);
1682 /* reply free queue handling */
1684 ioc->reply_free_host_index =
1685 (ioc->reply_free_host_index ==
1686 (ioc->reply_free_queue_depth - 1)) ?
1687 0 : ioc->reply_free_host_index + 1;
1688 ioc->reply_free[ioc->reply_free_host_index] =
1690 if (ioc->is_mcpu_endpoint)
1691 _base_clone_reply_to_sys_mem(ioc,
1693 ioc->reply_free_host_index);
1694 writel(ioc->reply_free_host_index,
1695 &ioc->chip->ReplyFreeHostIndex);
1699 rpf->Words = cpu_to_le64(ULLONG_MAX);
1700 reply_q->reply_post_host_index =
1701 (reply_q->reply_post_host_index ==
1702 (ioc->reply_post_queue_depth - 1)) ? 0 :
1703 reply_q->reply_post_host_index + 1;
1704 request_descript_type =
1705 reply_q->reply_post_free[reply_q->reply_post_host_index].
1706 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1708 /* Update the reply post host index after continuously
1709 * processing the threshold number of Reply Descriptors.
1710 * So that FW can find enough entries to post the Reply
1711 * Descriptors in the reply descriptor post queue.
1713 if (completed_cmds >= ioc->thresh_hold) {
1714 if (ioc->combined_reply_queue) {
1715 writel(reply_q->reply_post_host_index |
1716 ((msix_index & 7) <<
1717 MPI2_RPHI_MSIX_INDEX_SHIFT),
1718 ioc->replyPostRegisterIndex[msix_index/8]);
1720 writel(reply_q->reply_post_host_index |
1722 MPI2_RPHI_MSIX_INDEX_SHIFT),
1723 &ioc->chip->ReplyPostHostIndex);
1725 if (!reply_q->irq_poll_scheduled) {
1726 reply_q->irq_poll_scheduled = true;
1727 irq_poll_sched(&reply_q->irqpoll);
1729 atomic_dec(&reply_q->busy);
1730 return completed_cmds;
1732 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1734 if (!reply_q->reply_post_host_index)
1735 rpf = reply_q->reply_post_free;
1742 if (!completed_cmds) {
1743 atomic_dec(&reply_q->busy);
1744 return completed_cmds;
1747 if (ioc->is_warpdrive) {
1748 writel(reply_q->reply_post_host_index,
1749 ioc->reply_post_host_index[msix_index]);
1750 atomic_dec(&reply_q->busy);
1751 return completed_cmds;
1754 /* Update Reply Post Host Index.
1755 * For those HBA's which support combined reply queue feature
1756 * 1. Get the correct Supplemental Reply Post Host Index Register.
1757 * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1758 * Index Register address bank i.e replyPostRegisterIndex[],
1759 * 2. Then update this register with new reply host index value
1760 * in ReplyPostIndex field and the MSIxIndex field with
1761 * msix_index value reduced to a value between 0 and 7,
1762 * using a modulo 8 operation. Since each Supplemental Reply Post
1763 * Host Index Register supports 8 MSI-X vectors.
1765 * For other HBA's just update the Reply Post Host Index register with
1766 * new reply host index value in ReplyPostIndex Field and msix_index
1767 * value in MSIxIndex field.
1769 if (ioc->combined_reply_queue)
1770 writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
1771 MPI2_RPHI_MSIX_INDEX_SHIFT),
1772 ioc->replyPostRegisterIndex[msix_index/8]);
1774 writel(reply_q->reply_post_host_index | (msix_index <<
1775 MPI2_RPHI_MSIX_INDEX_SHIFT),
1776 &ioc->chip->ReplyPostHostIndex);
1777 atomic_dec(&reply_q->busy);
1778 return completed_cmds;
1782 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
1783 * @irq: irq number (not used)
1784 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
1786 * Return: IRQ_HANDLED if processed, else IRQ_NONE.
1789 _base_interrupt(int irq, void *bus_id)
1791 struct adapter_reply_queue *reply_q = bus_id;
1792 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1794 if (ioc->mask_interrupts)
1796 if (reply_q->irq_poll_scheduled)
1798 return ((_base_process_reply_queue(reply_q) > 0) ?
1799 IRQ_HANDLED : IRQ_NONE);
1803 * _base_irqpoll - IRQ poll callback handler
1804 * @irqpoll: irq_poll object
1805 * @budget: irq poll weight
1807 * Return: number of reply descriptors processed
1810 _base_irqpoll(struct irq_poll *irqpoll, int budget)
1812 struct adapter_reply_queue *reply_q;
1813 int num_entries = 0;
1815 reply_q = container_of(irqpoll, struct adapter_reply_queue,
1817 if (reply_q->irq_line_enable) {
1818 disable_irq_nosync(reply_q->os_irq);
1819 reply_q->irq_line_enable = false;
1821 num_entries = _base_process_reply_queue(reply_q);
1822 if (num_entries < budget) {
1823 irq_poll_complete(irqpoll);
1824 reply_q->irq_poll_scheduled = false;
1825 reply_q->irq_line_enable = true;
1826 enable_irq(reply_q->os_irq);
1828 * Go for one more round of processing the
1829 * reply descriptor post queue in case the HBA
1830 * Firmware has posted some reply descriptors
1831 * while reenabling the IRQ.
1833 _base_process_reply_queue(reply_q);
1840 * _base_init_irqpolls - initliaze IRQ polls
1841 * @ioc: per adapter object
1846 _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
1848 struct adapter_reply_queue *reply_q, *next;
1850 if (list_empty(&ioc->reply_queue_list))
1853 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1854 irq_poll_init(&reply_q->irqpoll,
1855 ioc->hba_queue_depth/4, _base_irqpoll);
1856 reply_q->irq_poll_scheduled = false;
1857 reply_q->irq_line_enable = true;
1858 reply_q->os_irq = pci_irq_vector(ioc->pdev,
1859 reply_q->msix_index);
1864 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1865 * @ioc: per adapter object
1867 * Return: Whether or not MSI/X is enabled.
1870 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1872 return (ioc->facts.IOCCapabilities &
1873 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1877 * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
1878 * @ioc: per adapter object
1879 * @poll: poll over reply descriptor pools incase interrupt for
1880 * timed-out SCSI command got delayed
1881 * Context: non-ISR context
1883 * Called when a Task Management request has completed.
1886 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
1888 struct adapter_reply_queue *reply_q;
1890 /* If MSIX capability is turned off
1891 * then multi-queues are not enabled
1893 if (!_base_is_controller_msix_enabled(ioc))
1896 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1897 if (ioc->shost_recovery || ioc->remove_host ||
1898 ioc->pci_error_recovery)
1900 /* TMs are on msix_index == 0 */
1901 if (reply_q->msix_index == 0)
1903 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
1904 if (reply_q->irq_poll_scheduled) {
1905 /* Calling irq_poll_disable will wait for any pending
1906 * callbacks to have completed.
1908 irq_poll_disable(&reply_q->irqpoll);
1909 irq_poll_enable(&reply_q->irqpoll);
1910 /* check how the scheduled poll has ended,
1911 * clean up only if necessary
1913 if (reply_q->irq_poll_scheduled) {
1914 reply_q->irq_poll_scheduled = false;
1915 reply_q->irq_line_enable = true;
1916 enable_irq(reply_q->os_irq);
1921 _base_process_reply_queue(reply_q);
1925 * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1926 * @cb_idx: callback index
1929 mpt3sas_base_release_callback_handler(u8 cb_idx)
1931 mpt_callbacks[cb_idx] = NULL;
1935 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1936 * @cb_func: callback function
1938 * Return: Index of @cb_func.
1941 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1945 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1946 if (mpt_callbacks[cb_idx] == NULL)
1949 mpt_callbacks[cb_idx] = cb_func;
1954 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1957 mpt3sas_base_initialize_callback_handler(void)
1961 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1962 mpt3sas_base_release_callback_handler(cb_idx);
1967 * _base_build_zero_len_sge - build zero length sg entry
1968 * @ioc: per adapter object
1969 * @paddr: virtual address for SGE
1971 * Create a zero length scatter gather entry to insure the IOCs hardware has
1972 * something to use if the target device goes brain dead and tries
1973 * to send data even when none is asked for.
1976 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1978 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1979 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1980 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1981 MPI2_SGE_FLAGS_SHIFT);
1982 ioc->base_add_sg_single(paddr, flags_length, -1);
1986 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1987 * @paddr: virtual address for SGE
1988 * @flags_length: SGE flags and data transfer length
1989 * @dma_addr: Physical address
1992 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1994 Mpi2SGESimple32_t *sgel = paddr;
1996 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1997 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1998 sgel->FlagsLength = cpu_to_le32(flags_length);
1999 sgel->Address = cpu_to_le32(dma_addr);
2004 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
2005 * @paddr: virtual address for SGE
2006 * @flags_length: SGE flags and data transfer length
2007 * @dma_addr: Physical address
2010 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
2012 Mpi2SGESimple64_t *sgel = paddr;
2014 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
2015 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
2016 sgel->FlagsLength = cpu_to_le32(flags_length);
2017 sgel->Address = cpu_to_le64(dma_addr);
2021 * _base_get_chain_buffer_tracker - obtain chain tracker
2022 * @ioc: per adapter object
2023 * @scmd: SCSI commands of the IO request
2025 * Return: chain tracker from chain_lookup table using key as
2026 * smid and smid's chain_offset.
2028 static struct chain_tracker *
2029 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
2030 struct scsi_cmnd *scmd)
2032 struct chain_tracker *chain_req;
2033 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
2034 u16 smid = st->smid;
2036 atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
2038 if (chain_offset == ioc->chains_needed_per_io)
2041 chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
2042 atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
2048 * _base_build_sg - build generic sg
2049 * @ioc: per adapter object
2050 * @psge: virtual address for SGE
2051 * @data_out_dma: physical address for WRITES
2052 * @data_out_sz: data xfer size for WRITES
2053 * @data_in_dma: physical address for READS
2054 * @data_in_sz: data xfer size for READS
2057 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
2058 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2063 if (!data_out_sz && !data_in_sz) {
2064 _base_build_zero_len_sge(ioc, psge);
2068 if (data_out_sz && data_in_sz) {
2069 /* WRITE sgel first */
2070 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2071 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
2072 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2073 ioc->base_add_sg_single(psge, sgl_flags |
2074 data_out_sz, data_out_dma);
2077 psge += ioc->sge_size;
2079 /* READ sgel last */
2080 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2081 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2082 MPI2_SGE_FLAGS_END_OF_LIST);
2083 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2084 ioc->base_add_sg_single(psge, sgl_flags |
2085 data_in_sz, data_in_dma);
2086 } else if (data_out_sz) /* WRITE */ {
2087 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2088 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2089 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
2090 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2091 ioc->base_add_sg_single(psge, sgl_flags |
2092 data_out_sz, data_out_dma);
2093 } else if (data_in_sz) /* READ */ {
2094 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2095 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2096 MPI2_SGE_FLAGS_END_OF_LIST);
2097 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2098 ioc->base_add_sg_single(psge, sgl_flags |
2099 data_in_sz, data_in_dma);
2103 /* IEEE format sgls */
2106 * _base_build_nvme_prp - This function is called for NVMe end devices to build
2107 * a native SGL (NVMe PRP).
2108 * @ioc: per adapter object
2109 * @smid: system request message index for getting asscociated SGL
2110 * @nvme_encap_request: the NVMe request msg frame pointer
2111 * @data_out_dma: physical address for WRITES
2112 * @data_out_sz: data xfer size for WRITES
2113 * @data_in_dma: physical address for READS
2114 * @data_in_sz: data xfer size for READS
2116 * The native SGL is built starting in the first PRP
2117 * entry of the NVMe message (PRP1). If the data buffer is small enough to be
2118 * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is
2119 * used to describe a larger data buffer. If the data buffer is too large to
2120 * describe using the two PRP entriess inside the NVMe message, then PRP1
2121 * describes the first data memory segment, and PRP2 contains a pointer to a PRP
2122 * list located elsewhere in memory to describe the remaining data memory
2123 * segments. The PRP list will be contiguous.
2125 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
2126 * consists of a list of PRP entries to describe a number of noncontigous
2127 * physical memory segments as a single memory buffer, just as a SGL does. Note
2128 * however, that this function is only used by the IOCTL call, so the memory
2129 * given will be guaranteed to be contiguous. There is no need to translate
2130 * non-contiguous SGL into a PRP in this case. All PRPs will describe
2131 * contiguous space that is one page size each.
2133 * Each NVMe message contains two PRP entries. The first (PRP1) either contains
2134 * a PRP list pointer or a PRP element, depending upon the command. PRP2
2135 * contains the second PRP element if the memory being described fits within 2
2136 * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
2138 * A PRP list pointer contains the address of a PRP list, structured as a linear
2139 * array of PRP entries. Each PRP entry in this list describes a segment of
2142 * Each 64-bit PRP entry comprises an address and an offset field. The address
2143 * always points at the beginning of a 4KB physical memory page, and the offset
2144 * describes where within that 4KB page the memory segment begins. Only the
2145 * first element in a PRP list may contain a non-zero offset, implying that all
2146 * memory segments following the first begin at the start of a 4KB page.
2148 * Each PRP element normally describes 4KB of physical memory, with exceptions
2149 * for the first and last elements in the list. If the memory being described
2150 * by the list begins at a non-zero offset within the first 4KB page, then the
2151 * first PRP element will contain a non-zero offset indicating where the region
2152 * begins within the 4KB page. The last memory segment may end before the end
2153 * of the 4KB segment, depending upon the overall size of the memory being
2154 * described by the PRP list.
2156 * Since PRP entries lack any indication of size, the overall data buffer length
2157 * is used to determine where the end of the data memory buffer is located, and
2158 * how many PRP entries are required to describe it.
2161 _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2162 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
2163 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2166 int prp_size = NVME_PRP_SIZE;
2167 __le64 *prp_entry, *prp1_entry, *prp2_entry;
2169 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
2170 u32 offset, entry_len;
2171 u32 page_mask_result, page_mask;
2173 struct mpt3sas_nvme_cmd *nvme_cmd =
2174 (void *)nvme_encap_request->NVMe_Command;
2177 * Not all commands require a data transfer. If no data, just return
2178 * without constructing any PRP.
2180 if (!data_in_sz && !data_out_sz)
2182 prp1_entry = &nvme_cmd->prp1;
2183 prp2_entry = &nvme_cmd->prp2;
2184 prp_entry = prp1_entry;
2186 * For the PRP entries, use the specially allocated buffer of
2187 * contiguous memory.
2189 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
2190 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2193 * Check if we are within 1 entry of a page boundary we don't
2194 * want our first entry to be a PRP List entry.
2196 page_mask = ioc->page_size - 1;
2197 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
2198 if (!page_mask_result) {
2199 /* Bump up to next page boundary. */
2200 prp_page = (__le64 *)((u8 *)prp_page + prp_size);
2201 prp_page_dma = prp_page_dma + prp_size;
2205 * Set PRP physical pointer, which initially points to the current PRP
2208 prp_entry_dma = prp_page_dma;
2210 /* Get physical address and length of the data buffer. */
2212 dma_addr = data_in_dma;
2213 length = data_in_sz;
2215 dma_addr = data_out_dma;
2216 length = data_out_sz;
2219 /* Loop while the length is not zero. */
2222 * Check if we need to put a list pointer here if we are at
2223 * page boundary - prp_size (8 bytes).
2225 page_mask_result = (prp_entry_dma + prp_size) & page_mask;
2226 if (!page_mask_result) {
2228 * This is the last entry in a PRP List, so we need to
2229 * put a PRP list pointer here. What this does is:
2230 * - bump the current memory pointer to the next
2231 * address, which will be the next full page.
2232 * - set the PRP Entry to point to that page. This
2233 * is now the PRP List pointer.
2234 * - bump the PRP Entry pointer the start of the
2235 * next page. Since all of this PRP memory is
2236 * contiguous, no need to get a new page - it's
2237 * just the next address.
2240 *prp_entry = cpu_to_le64(prp_entry_dma);
2244 /* Need to handle if entry will be part of a page. */
2245 offset = dma_addr & page_mask;
2246 entry_len = ioc->page_size - offset;
2248 if (prp_entry == prp1_entry) {
2250 * Must fill in the first PRP pointer (PRP1) before
2253 *prp1_entry = cpu_to_le64(dma_addr);
2256 * Now point to the second PRP entry within the
2259 prp_entry = prp2_entry;
2260 } else if (prp_entry == prp2_entry) {
2262 * Should the PRP2 entry be a PRP List pointer or just
2263 * a regular PRP pointer? If there is more than one
2264 * more page of data, must use a PRP List pointer.
2266 if (length > ioc->page_size) {
2268 * PRP2 will contain a PRP List pointer because
2269 * more PRP's are needed with this command. The
2270 * list will start at the beginning of the
2271 * contiguous buffer.
2273 *prp2_entry = cpu_to_le64(prp_entry_dma);
2276 * The next PRP Entry will be the start of the
2279 prp_entry = prp_page;
2282 * After this, the PRP Entries are complete.
2283 * This command uses 2 PRP's and no PRP list.
2285 *prp2_entry = cpu_to_le64(dma_addr);
2289 * Put entry in list and bump the addresses.
2291 * After PRP1 and PRP2 are filled in, this will fill in
2292 * all remaining PRP entries in a PRP List, one per
2293 * each time through the loop.
2295 *prp_entry = cpu_to_le64(dma_addr);
2301 * Bump the phys address of the command's data buffer by the
2304 dma_addr += entry_len;
2306 /* Decrement length accounting for last partial page. */
2307 if (entry_len > length)
2310 length -= entry_len;
2315 * base_make_prp_nvme - Prepare PRPs (Physical Region Page) -
2316 * SGLs specific to NVMe drives only
2318 * @ioc: per adapter object
2319 * @scmd: SCSI command from the mid-layer
2320 * @mpi_request: mpi request
2322 * @sge_count: scatter gather element count.
2324 * Return: true: PRPs are built
2325 * false: IEEE SGLs needs to be built
2328 base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2329 struct scsi_cmnd *scmd,
2330 Mpi25SCSIIORequest_t *mpi_request,
2331 u16 smid, int sge_count)
2333 int sge_len, num_prp_in_chain = 0;
2334 Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
2336 dma_addr_t msg_dma, sge_addr, offset;
2337 u32 page_mask, page_mask_result;
2338 struct scatterlist *sg_scmd;
2340 int data_len = scsi_bufflen(scmd);
2343 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2345 * Nvme has a very convoluted prp format. One prp is required
2346 * for each page or partial page. Driver need to split up OS sg_list
2347 * entries if it is longer than one page or cross a page
2348 * boundary. Driver also have to insert a PRP list pointer entry as
2349 * the last entry in each physical page of the PRP list.
2351 * NOTE: The first PRP "entry" is actually placed in the first
2352 * SGL entry in the main message as IEEE 64 format. The 2nd
2353 * entry in the main message is the chain element, and the rest
2354 * of the PRP entries are built in the contiguous pcie buffer.
2356 page_mask = nvme_pg_size - 1;
2359 * Native SGL is needed.
2360 * Put a chain element in main message frame that points to the first
2363 * NOTE: The ChainOffset field must be 0 when using a chain pointer to
2367 /* Set main message chain element pointer */
2368 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2370 * For NVMe the chain element needs to be the 2nd SG entry in the main
2373 main_chain_element = (Mpi25IeeeSgeChain64_t *)
2374 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2377 * For the PRP entries, use the specially allocated buffer of
2378 * contiguous memory. Normal chain buffers can't be used
2379 * because each chain buffer would need to be the size of an OS
2382 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2383 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2385 main_chain_element->Address = cpu_to_le64(msg_dma);
2386 main_chain_element->NextChainOffset = 0;
2387 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2388 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2389 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2391 /* Build first prp, sge need not to be page aligned*/
2392 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2393 sg_scmd = scsi_sglist(scmd);
2394 sge_addr = sg_dma_address(sg_scmd);
2395 sge_len = sg_dma_len(sg_scmd);
2397 offset = sge_addr & page_mask;
2398 first_prp_len = nvme_pg_size - offset;
2400 ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2401 ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2403 data_len -= first_prp_len;
2405 if (sge_len > first_prp_len) {
2406 sge_addr += first_prp_len;
2407 sge_len -= first_prp_len;
2408 } else if (data_len && (sge_len == first_prp_len)) {
2409 sg_scmd = sg_next(sg_scmd);
2410 sge_addr = sg_dma_address(sg_scmd);
2411 sge_len = sg_dma_len(sg_scmd);
2415 offset = sge_addr & page_mask;
2417 /* Put PRP pointer due to page boundary*/
2418 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2419 if (unlikely(!page_mask_result)) {
2420 scmd_printk(KERN_NOTICE,
2421 scmd, "page boundary curr_buff: 0x%p\n",
2424 *curr_buff = cpu_to_le64(msg_dma);
2429 *curr_buff = cpu_to_le64(sge_addr);
2434 sge_addr += nvme_pg_size;
2435 sge_len -= nvme_pg_size;
2436 data_len -= nvme_pg_size;
2444 sg_scmd = sg_next(sg_scmd);
2445 sge_addr = sg_dma_address(sg_scmd);
2446 sge_len = sg_dma_len(sg_scmd);
2449 main_chain_element->Length =
2450 cpu_to_le32(num_prp_in_chain * sizeof(u64));
2455 base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2456 struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2458 u32 data_length = 0;
2459 bool build_prp = true;
2461 data_length = scsi_bufflen(scmd);
2463 (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
2468 /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
2471 if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2478 * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
2479 * determine if the driver needs to build a native SGL. If so, that native
2480 * SGL is built in the special contiguous buffers allocated especially for
2481 * PCIe SGL creation. If the driver will not build a native SGL, return
2482 * TRUE and a normal IEEE SGL will be built. Currently this routine
2484 * @ioc: per adapter object
2485 * @mpi_request: mf request pointer
2486 * @smid: system request message index
2487 * @scmd: scsi command
2488 * @pcie_device: points to the PCIe device's info
2490 * Return: 0 if native SGL was built, 1 if no SGL was built
2493 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2494 Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2495 struct _pcie_device *pcie_device)
2499 /* Get the SG list pointer and info. */
2500 sges_left = scsi_dma_map(scmd);
2501 if (sges_left < 0) {
2502 sdev_printk(KERN_ERR, scmd->device,
2503 "scsi_dma_map failed: request for %d bytes!\n",
2504 scsi_bufflen(scmd));
2508 /* Check if we need to build a native SG list. */
2509 if (!base_is_prp_possible(ioc, pcie_device,
2511 /* We built a native SG list, just return. */
2516 * Build native NVMe PRP.
2518 base_make_prp_nvme(ioc, scmd, mpi_request,
2523 scsi_dma_unmap(scmd);
2528 * _base_add_sg_single_ieee - add sg element for IEEE format
2529 * @paddr: virtual address for SGE
2531 * @chain_offset: number of 128 byte elements from start of segment
2532 * @length: data transfer length
2533 * @dma_addr: Physical address
2536 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2537 dma_addr_t dma_addr)
2539 Mpi25IeeeSgeChain64_t *sgel = paddr;
2541 sgel->Flags = flags;
2542 sgel->NextChainOffset = chain_offset;
2543 sgel->Length = cpu_to_le32(length);
2544 sgel->Address = cpu_to_le64(dma_addr);
2548 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
2549 * @ioc: per adapter object
2550 * @paddr: virtual address for SGE
2552 * Create a zero length scatter gather entry to insure the IOCs hardware has
2553 * something to use if the target device goes brain dead and tries
2554 * to send data even when none is asked for.
2557 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2559 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2560 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2561 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
2563 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2567 * _base_build_sg_scmd - main sg creation routine
2568 * pcie_device is unused here!
2569 * @ioc: per adapter object
2570 * @scmd: scsi command
2571 * @smid: system request message index
2572 * @unused: unused pcie_device pointer
2575 * The main routine that builds scatter gather table from a given
2576 * scsi request sent via the .queuecommand main handler.
2578 * Return: 0 success, anything else error
2581 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2582 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
2584 Mpi2SCSIIORequest_t *mpi_request;
2585 dma_addr_t chain_dma;
2586 struct scatterlist *sg_scmd;
2587 void *sg_local, *chain;
2592 u32 sges_in_segment;
2594 u32 sgl_flags_last_element;
2595 u32 sgl_flags_end_buffer;
2596 struct chain_tracker *chain_req;
2598 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2600 /* init scatter gather flags */
2601 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2602 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2603 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2604 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2605 << MPI2_SGE_FLAGS_SHIFT;
2606 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2607 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2608 << MPI2_SGE_FLAGS_SHIFT;
2609 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2611 sg_scmd = scsi_sglist(scmd);
2612 sges_left = scsi_dma_map(scmd);
2613 if (sges_left < 0) {
2614 sdev_printk(KERN_ERR, scmd->device,
2615 "scsi_dma_map failed: request for %d bytes!\n",
2616 scsi_bufflen(scmd));
2620 sg_local = &mpi_request->SGL;
2621 sges_in_segment = ioc->max_sges_in_main_message;
2622 if (sges_left <= sges_in_segment)
2623 goto fill_in_last_segment;
2625 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2626 (sges_in_segment * ioc->sge_size))/4;
2628 /* fill in main message segment when there is a chain following */
2629 while (sges_in_segment) {
2630 if (sges_in_segment == 1)
2631 ioc->base_add_sg_single(sg_local,
2632 sgl_flags_last_element | sg_dma_len(sg_scmd),
2633 sg_dma_address(sg_scmd));
2635 ioc->base_add_sg_single(sg_local, sgl_flags |
2636 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2637 sg_scmd = sg_next(sg_scmd);
2638 sg_local += ioc->sge_size;
2643 /* initializing the chain flags and pointers */
2644 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
2645 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2648 chain = chain_req->chain_buffer;
2649 chain_dma = chain_req->chain_buffer_dma;
2651 sges_in_segment = (sges_left <=
2652 ioc->max_sges_in_chain_message) ? sges_left :
2653 ioc->max_sges_in_chain_message;
2654 chain_offset = (sges_left == sges_in_segment) ?
2655 0 : (sges_in_segment * ioc->sge_size)/4;
2656 chain_length = sges_in_segment * ioc->sge_size;
2658 chain_offset = chain_offset <<
2659 MPI2_SGE_CHAIN_OFFSET_SHIFT;
2660 chain_length += ioc->sge_size;
2662 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2663 chain_length, chain_dma);
2666 goto fill_in_last_segment;
2668 /* fill in chain segments */
2669 while (sges_in_segment) {
2670 if (sges_in_segment == 1)
2671 ioc->base_add_sg_single(sg_local,
2672 sgl_flags_last_element |
2673 sg_dma_len(sg_scmd),
2674 sg_dma_address(sg_scmd));
2676 ioc->base_add_sg_single(sg_local, sgl_flags |
2677 sg_dma_len(sg_scmd),
2678 sg_dma_address(sg_scmd));
2679 sg_scmd = sg_next(sg_scmd);
2680 sg_local += ioc->sge_size;
2685 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2688 chain = chain_req->chain_buffer;
2689 chain_dma = chain_req->chain_buffer_dma;
2693 fill_in_last_segment:
2695 /* fill the last segment */
2698 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2699 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2701 ioc->base_add_sg_single(sg_local, sgl_flags |
2702 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2703 sg_scmd = sg_next(sg_scmd);
2704 sg_local += ioc->sge_size;
2712 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
2713 * @ioc: per adapter object
2714 * @scmd: scsi command
2715 * @smid: system request message index
2716 * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
2717 * constructed on need.
2720 * The main routine that builds scatter gather table from a given
2721 * scsi request sent via the .queuecommand main handler.
2723 * Return: 0 success, anything else error
2726 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2727 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
2729 Mpi25SCSIIORequest_t *mpi_request;
2730 dma_addr_t chain_dma;
2731 struct scatterlist *sg_scmd;
2732 void *sg_local, *chain;
2736 u32 sges_in_segment;
2737 u8 simple_sgl_flags;
2738 u8 simple_sgl_flags_last;
2740 struct chain_tracker *chain_req;
2742 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2744 /* init scatter gather flags */
2745 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2746 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2747 simple_sgl_flags_last = simple_sgl_flags |
2748 MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2749 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2750 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2752 /* Check if we need to build a native SG list. */
2753 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2754 smid, scmd, pcie_device) == 0)) {
2755 /* We built a native SG list, just return. */
2759 sg_scmd = scsi_sglist(scmd);
2760 sges_left = scsi_dma_map(scmd);
2761 if (sges_left < 0) {
2762 sdev_printk(KERN_ERR, scmd->device,
2763 "scsi_dma_map failed: request for %d bytes!\n",
2764 scsi_bufflen(scmd));
2768 sg_local = &mpi_request->SGL;
2769 sges_in_segment = (ioc->request_sz -
2770 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2771 if (sges_left <= sges_in_segment)
2772 goto fill_in_last_segment;
2774 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
2775 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2777 /* fill in main message segment when there is a chain following */
2778 while (sges_in_segment > 1) {
2779 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2780 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2781 sg_scmd = sg_next(sg_scmd);
2782 sg_local += ioc->sge_size_ieee;
2787 /* initializing the pointers */
2788 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2791 chain = chain_req->chain_buffer;
2792 chain_dma = chain_req->chain_buffer_dma;
2794 sges_in_segment = (sges_left <=
2795 ioc->max_sges_in_chain_message) ? sges_left :
2796 ioc->max_sges_in_chain_message;
2797 chain_offset = (sges_left == sges_in_segment) ?
2798 0 : sges_in_segment;
2799 chain_length = sges_in_segment * ioc->sge_size_ieee;
2801 chain_length += ioc->sge_size_ieee;
2802 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2803 chain_offset, chain_length, chain_dma);
2807 goto fill_in_last_segment;
2809 /* fill in chain segments */
2810 while (sges_in_segment) {
2811 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2812 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2813 sg_scmd = sg_next(sg_scmd);
2814 sg_local += ioc->sge_size_ieee;
2819 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2822 chain = chain_req->chain_buffer;
2823 chain_dma = chain_req->chain_buffer_dma;
2827 fill_in_last_segment:
2829 /* fill the last segment */
2830 while (sges_left > 0) {
2832 _base_add_sg_single_ieee(sg_local,
2833 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2834 sg_dma_address(sg_scmd));
2836 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2837 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2838 sg_scmd = sg_next(sg_scmd);
2839 sg_local += ioc->sge_size_ieee;
2847 * _base_build_sg_ieee - build generic sg for IEEE format
2848 * @ioc: per adapter object
2849 * @psge: virtual address for SGE
2850 * @data_out_dma: physical address for WRITES
2851 * @data_out_sz: data xfer size for WRITES
2852 * @data_in_dma: physical address for READS
2853 * @data_in_sz: data xfer size for READS
2856 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2857 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2862 if (!data_out_sz && !data_in_sz) {
2863 _base_build_zero_len_sge_ieee(ioc, psge);
2867 if (data_out_sz && data_in_sz) {
2868 /* WRITE sgel first */
2869 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2870 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2871 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2875 psge += ioc->sge_size_ieee;
2877 /* READ sgel last */
2878 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2879 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2881 } else if (data_out_sz) /* WRITE */ {
2882 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2883 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2884 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2885 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2887 } else if (data_in_sz) /* READ */ {
2888 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2889 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2890 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2891 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2896 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2899 * _base_config_dma_addressing - set dma addressing
2900 * @ioc: per adapter object
2901 * @pdev: PCI device struct
2903 * Return: 0 for success, non-zero for failure.
2906 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2910 if (ioc->is_mcpu_endpoint ||
2911 sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
2912 dma_get_required_mask(&pdev->dev) <= 32)
2914 /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
2915 else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
2920 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) ||
2921 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)))
2924 if (ioc->dma_mask > 32) {
2925 ioc->base_add_sg_single = &_base_add_sg_single_64;
2926 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2928 ioc->base_add_sg_single = &_base_add_sg_single_32;
2929 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
2933 ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2934 ioc->dma_mask, convert_to_kb(s.totalram));
2940 * _base_check_enable_msix - checks MSIX capabable.
2941 * @ioc: per adapter object
2943 * Check to see if card is capable of MSIX, and set number
2944 * of available msix vectors
2947 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2950 u16 message_control;
2952 /* Check whether controller SAS2008 B0 controller,
2953 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
2955 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
2956 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
2960 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2962 dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
2966 /* get msix vector count */
2967 /* NUMA_IO not supported for older controllers */
2968 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
2969 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
2970 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
2971 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
2972 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
2973 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
2974 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
2975 ioc->msix_vector_count = 1;
2977 pci_read_config_word(ioc->pdev, base + 2, &message_control);
2978 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2980 dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
2981 ioc->msix_vector_count));
2986 * _base_free_irq - free irq
2987 * @ioc: per adapter object
2989 * Freeing respective reply_queue from the list.
2992 _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
2994 struct adapter_reply_queue *reply_q, *next;
2996 if (list_empty(&ioc->reply_queue_list))
2999 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
3000 list_del(&reply_q->list);
3001 if (ioc->smp_affinity_enable)
3002 irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
3003 reply_q->msix_index), NULL);
3004 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
3011 * _base_request_irq - request irq
3012 * @ioc: per adapter object
3013 * @index: msix index into vector table
3015 * Inserting respective reply_queue into the list.
3018 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
3020 struct pci_dev *pdev = ioc->pdev;
3021 struct adapter_reply_queue *reply_q;
3024 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
3026 ioc_err(ioc, "unable to allocate memory %zu!\n",
3027 sizeof(struct adapter_reply_queue));
3031 reply_q->msix_index = index;
3033 atomic_set(&reply_q->busy, 0);
3034 if (ioc->msix_enable)
3035 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
3036 ioc->driver_name, ioc->id, index);
3038 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
3039 ioc->driver_name, ioc->id);
3040 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
3041 IRQF_SHARED, reply_q->name, reply_q);
3043 pr_err("%s: unable to allocate interrupt %d!\n",
3044 reply_q->name, pci_irq_vector(pdev, index));
3049 INIT_LIST_HEAD(&reply_q->list);
3050 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
3055 * _base_assign_reply_queues - assigning msix index for each cpu
3056 * @ioc: per adapter object
3058 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
3060 * It would nice if we could call irq_set_affinity, however it is not
3061 * an exported symbol
3064 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
3066 unsigned int cpu, nr_cpus, nr_msix, index = 0;
3067 struct adapter_reply_queue *reply_q;
3068 int local_numa_node;
3070 if (!_base_is_controller_msix_enabled(ioc))
3073 if (ioc->msix_load_balance)
3076 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
3078 nr_cpus = num_online_cpus();
3079 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
3080 ioc->facts.MaxMSIxVectors);
3084 if (ioc->smp_affinity_enable) {
3087 * set irq affinity to local numa node for those irqs
3088 * corresponding to high iops queues.
3090 if (ioc->high_iops_queues) {
3091 local_numa_node = dev_to_node(&ioc->pdev->dev);
3092 for (index = 0; index < ioc->high_iops_queues;
3094 irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
3095 index), cpumask_of_node(local_numa_node));
3099 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3100 const cpumask_t *mask;
3102 if (reply_q->msix_index < ioc->high_iops_queues)
3105 mask = pci_irq_get_affinity(ioc->pdev,
3106 reply_q->msix_index);
3108 ioc_warn(ioc, "no affinity for msi %x\n",
3109 reply_q->msix_index);
3113 for_each_cpu_and(cpu, mask, cpu_online_mask) {
3114 if (cpu >= ioc->cpu_msix_table_sz)
3116 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3123 cpu = cpumask_first(cpu_online_mask);
3124 nr_msix -= ioc->high_iops_queues;
3127 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3128 unsigned int i, group = nr_cpus / nr_msix;
3130 if (reply_q->msix_index < ioc->high_iops_queues)
3136 if (index < nr_cpus % nr_msix)
3139 for (i = 0 ; i < group ; i++) {
3140 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3141 cpu = cpumask_next(cpu, cpu_online_mask);
3148 * _base_check_and_enable_high_iops_queues - enable high iops mode
3149 * @ioc: per adapter object
3150 * @hba_msix_vector_count: msix vectors supported by HBA
3152 * Enable high iops queues only if
3153 * - HBA is a SEA/AERO controller and
3154 * - MSI-Xs vector supported by the HBA is 128 and
3155 * - total CPU count in the system >=16 and
3156 * - loaded driver with default max_msix_vectors module parameter and
3157 * - system booted in non kdump mode
3162 _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
3163 int hba_msix_vector_count)
3167 if (perf_mode == MPT_PERF_MODE_IOPS ||
3168 perf_mode == MPT_PERF_MODE_LATENCY) {
3169 ioc->high_iops_queues = 0;
3173 if (perf_mode == MPT_PERF_MODE_DEFAULT) {
3175 pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
3176 speed = lnksta & PCI_EXP_LNKSTA_CLS;
3179 ioc->high_iops_queues = 0;
3184 if (!reset_devices && ioc->is_aero_ioc &&
3185 hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
3186 num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
3187 max_msix_vectors == -1)
3188 ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
3190 ioc->high_iops_queues = 0;
3194 * _base_disable_msix - disables msix
3195 * @ioc: per adapter object
3199 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
3201 if (!ioc->msix_enable)
3203 pci_free_irq_vectors(ioc->pdev);
3204 ioc->msix_enable = 0;
3208 * _base_alloc_irq_vectors - allocate msix vectors
3209 * @ioc: per adapter object
3213 _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
3215 int i, irq_flags = PCI_IRQ_MSIX;
3216 struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
3217 struct irq_affinity *descp = &desc;
3219 if (ioc->smp_affinity_enable)
3220 irq_flags |= PCI_IRQ_AFFINITY;
3224 ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
3225 ioc->reply_queue_count);
3227 i = pci_alloc_irq_vectors_affinity(ioc->pdev,
3228 ioc->high_iops_queues,
3229 ioc->reply_queue_count, irq_flags, descp);
3235 * _base_enable_msix - enables msix, failback to io_apic
3236 * @ioc: per adapter object
3240 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3243 int i, local_max_msix_vectors;
3246 ioc->msix_load_balance = false;
3248 if (msix_disable == -1 || msix_disable == 0)
3254 if (_base_check_enable_msix(ioc) != 0)
3257 ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
3258 pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
3259 ioc->cpu_count, max_msix_vectors);
3260 if (ioc->is_aero_ioc)
3261 _base_check_and_enable_high_iops_queues(ioc,
3262 ioc->msix_vector_count);
3263 ioc->reply_queue_count =
3264 min_t(int, ioc->cpu_count + ioc->high_iops_queues,
3265 ioc->msix_vector_count);
3267 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
3268 local_max_msix_vectors = (reset_devices) ? 1 : 8;
3270 local_max_msix_vectors = max_msix_vectors;
3272 if (local_max_msix_vectors > 0)
3273 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
3274 ioc->reply_queue_count);
3275 else if (local_max_msix_vectors == 0)
3279 * Enable msix_load_balance only if combined reply queue mode is
3280 * disabled on SAS3 & above generation HBA devices.
3282 if (!ioc->combined_reply_queue &&
3283 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3285 "combined ReplyQueue is off, Enabling msix load balance\n");
3286 ioc->msix_load_balance = true;
3290 * smp affinity setting is not need when msix load balance
3293 if (ioc->msix_load_balance)
3294 ioc->smp_affinity_enable = 0;
3296 r = _base_alloc_irq_vectors(ioc);
3298 ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
3302 ioc->msix_enable = 1;
3303 ioc->reply_queue_count = r;
3304 for (i = 0; i < ioc->reply_queue_count; i++) {
3305 r = _base_request_irq(ioc, i);
3307 _base_free_irq(ioc);
3308 _base_disable_msix(ioc);
3313 ioc_info(ioc, "High IOPs queues : %s\n",
3314 ioc->high_iops_queues ? "enabled" : "disabled");
3318 /* failback to io_apic interrupt routing */
3320 ioc->high_iops_queues = 0;
3321 ioc_info(ioc, "High IOPs queues : disabled\n");
3322 ioc->reply_queue_count = 1;
3323 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
3326 ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
3329 r = _base_request_irq(ioc, 0);
3335 * mpt3sas_base_unmap_resources - free controller resources
3336 * @ioc: per adapter object
3339 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
3341 struct pci_dev *pdev = ioc->pdev;
3343 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3345 _base_free_irq(ioc);
3346 _base_disable_msix(ioc);
3348 kfree(ioc->replyPostRegisterIndex);
3349 ioc->replyPostRegisterIndex = NULL;
3352 if (ioc->chip_phys) {
3357 if (pci_is_enabled(pdev)) {
3358 pci_release_selected_regions(ioc->pdev, ioc->bars);
3359 pci_disable_pcie_error_reporting(pdev);
3360 pci_disable_device(pdev);
3365 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3368 * mpt3sas_base_check_for_fault_and_issue_reset - check if IOC is in fault state
3369 * and if it is in fault state then issue diag reset.
3370 * @ioc: per adapter object
3372 * Return: 0 for success, non-zero for failure.
3375 mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
3380 dinitprintk(ioc, pr_info("%s\n", __func__));
3381 if (ioc->pci_error_recovery)
3383 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3384 dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
3386 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3387 mpt3sas_print_fault_code(ioc, ioc_state &
3388 MPI2_DOORBELL_DATA_MASK);
3389 mpt3sas_base_mask_interrupts(ioc);
3390 rc = _base_diag_reset(ioc);
3391 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3392 MPI2_IOC_STATE_COREDUMP) {
3393 mpt3sas_print_coredump_info(ioc, ioc_state &
3394 MPI2_DOORBELL_DATA_MASK);
3395 mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
3396 mpt3sas_base_mask_interrupts(ioc);
3397 rc = _base_diag_reset(ioc);
3404 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
3405 * @ioc: per adapter object
3407 * Return: 0 for success, non-zero for failure.
3410 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3412 struct pci_dev *pdev = ioc->pdev;
3417 phys_addr_t chip_phys = 0;
3418 struct adapter_reply_queue *reply_q;
3420 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3422 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3423 if (pci_enable_device_mem(pdev)) {
3424 ioc_warn(ioc, "pci_enable_device_mem: failed\n");
3430 if (pci_request_selected_regions(pdev, ioc->bars,
3431 ioc->driver_name)) {
3432 ioc_warn(ioc, "pci_request_selected_regions: failed\n");
3438 /* AER (Advanced Error Reporting) hooks */
3439 pci_enable_pcie_error_reporting(pdev);
3441 pci_set_master(pdev);
3444 if (_base_config_dma_addressing(ioc, pdev) != 0) {
3445 ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
3450 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
3451 (!memap_sz || !pio_sz); i++) {
3452 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
3455 pio_chip = (u64)pci_resource_start(pdev, i);
3456 pio_sz = pci_resource_len(pdev, i);
3457 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3460 ioc->chip_phys = pci_resource_start(pdev, i);
3461 chip_phys = ioc->chip_phys;
3462 memap_sz = pci_resource_len(pdev, i);
3463 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
3467 if (ioc->chip == NULL) {
3469 "unable to map adapter memory! or resource not found\n");
3474 mpt3sas_base_mask_interrupts(ioc);
3476 r = _base_get_ioc_facts(ioc);
3478 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
3479 if (rc || (_base_get_ioc_facts(ioc)))
3483 if (!ioc->rdpq_array_enable_assigned) {
3484 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3485 ioc->rdpq_array_enable_assigned = 1;
3488 r = _base_enable_msix(ioc);
3492 if (!ioc->is_driver_loading)
3493 _base_init_irqpolls(ioc);
3494 /* Use the Combined reply queue feature only for SAS3 C0 & higher
3495 * revision HBAs and also only when reply queue count is greater than 8
3497 if (ioc->combined_reply_queue) {
3498 /* Determine the Supplemental Reply Post Host Index Registers
3499 * Addresse. Supplemental Reply Post Host Index Registers
3500 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
3501 * each register is at offset bytes of
3502 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
3504 ioc->replyPostRegisterIndex = kcalloc(
3505 ioc->combined_reply_index_count,
3506 sizeof(resource_size_t *), GFP_KERNEL);
3507 if (!ioc->replyPostRegisterIndex) {
3509 "allocation for replyPostRegisterIndex failed!\n");
3514 for (i = 0; i < ioc->combined_reply_index_count; i++) {
3515 ioc->replyPostRegisterIndex[i] = (resource_size_t *)
3516 ((u8 __force *)&ioc->chip->Doorbell +
3517 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3518 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3522 if (ioc->is_warpdrive) {
3523 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3524 &ioc->chip->ReplyPostHostIndex;
3526 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3527 ioc->reply_post_host_index[i] =
3528 (resource_size_t __iomem *)
3529 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3533 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
3534 pr_info("%s: %s enabled: IRQ %d\n",
3536 ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3537 pci_irq_vector(ioc->pdev, reply_q->msix_index));
3539 ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3540 &chip_phys, ioc->chip, memap_sz);
3541 ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3542 (unsigned long long)pio_chip, pio_sz);
3544 /* Save PCI configuration state for recovery from PCI AER/EEH errors */
3545 pci_save_state(pdev);
3549 mpt3sas_base_unmap_resources(ioc);
3554 * mpt3sas_base_get_msg_frame - obtain request mf pointer
3555 * @ioc: per adapter object
3556 * @smid: system request message index(smid zero is invalid)
3558 * Return: virt pointer to message frame.
3561 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3563 return (void *)(ioc->request + (smid * ioc->request_sz));
3567 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
3568 * @ioc: per adapter object
3569 * @smid: system request message index
3571 * Return: virt pointer to sense buffer.
3574 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3576 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3580 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
3581 * @ioc: per adapter object
3582 * @smid: system request message index
3584 * Return: phys pointer to the low 32bit address of the sense buffer.
3587 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3589 return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3590 SCSI_SENSE_BUFFERSIZE));
3594 * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
3595 * @ioc: per adapter object
3596 * @smid: system request message index
3598 * Return: virt pointer to a PCIe SGL.
3601 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3603 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3607 * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
3608 * @ioc: per adapter object
3609 * @smid: system request message index
3611 * Return: phys pointer to the address of the PCIe buffer.
3614 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3616 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3620 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
3621 * @ioc: per adapter object
3622 * @phys_addr: lower 32 physical addr of the reply
3624 * Converts 32bit lower physical addr into a virt address.
3627 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3631 return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3635 * _base_get_msix_index - get the msix index
3636 * @ioc: per adapter object
3637 * @scmd: scsi_cmnd object
3639 * Return: msix index of general reply queues,
3640 * i.e. reply queue on which IO request's reply
3641 * should be posted by the HBA firmware.
3644 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
3645 struct scsi_cmnd *scmd)
3647 /* Enables reply_queue load balancing */
3648 if (ioc->msix_load_balance)
3649 return ioc->reply_queue_count ?
3650 base_mod64(atomic64_add_return(1,
3651 &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
3653 if (scmd && ioc->shost->nr_hw_queues > 1) {
3654 u32 tag = blk_mq_unique_tag(scmd->request);
3656 return blk_mq_unique_tag_to_hwq(tag) +
3657 ioc->high_iops_queues;
3660 return ioc->cpu_msix_table[raw_smp_processor_id()];
3664 * _base_get_high_iops_msix_index - get the msix index of
3666 * @ioc: per adapter object
3667 * @scmd: scsi_cmnd object
3669 * Return: msix index of high iops reply queues.
3670 * i.e. high iops reply queue on which IO request's
3671 * reply should be posted by the HBA firmware.
3674 _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
3675 struct scsi_cmnd *scmd)
3678 * Round robin the IO interrupts among the high iops
3679 * reply queues in terms of batch count 16 when outstanding
3680 * IOs on the target device is >=8.
3683 if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
3685 atomic64_add_return(1, &ioc->high_iops_outstanding) /
3686 MPT3SAS_HIGH_IOPS_BATCH_COUNT),
3687 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
3689 return _base_get_msix_index(ioc, scmd);
3693 * mpt3sas_base_get_smid - obtain a free smid from internal queue
3694 * @ioc: per adapter object
3695 * @cb_idx: callback index
3697 * Return: smid (zero is invalid)
3700 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3702 unsigned long flags;
3703 struct request_tracker *request;
3706 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3707 if (list_empty(&ioc->internal_free_list)) {
3708 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3709 ioc_err(ioc, "%s: smid not available\n", __func__);
3713 request = list_entry(ioc->internal_free_list.next,
3714 struct request_tracker, tracker_list);
3715 request->cb_idx = cb_idx;
3716 smid = request->smid;
3717 list_del(&request->tracker_list);
3718 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3723 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
3724 * @ioc: per adapter object
3725 * @cb_idx: callback index
3726 * @scmd: pointer to scsi command object
3728 * Return: smid (zero is invalid)
3731 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3732 struct scsi_cmnd *scmd)
3734 struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3736 u32 tag, unique_tag;
3738 unique_tag = blk_mq_unique_tag(scmd->request);
3739 tag = blk_mq_unique_tag_to_tag(unique_tag);
3742 * Store hw queue number corresponding to the tag.
3743 * This hw queue number is used later to determine
3744 * the unique_tag using the logic below. This unique_tag
3745 * is used to retrieve the scmd pointer corresponding
3746 * to tag using scsi_host_find_tag() API.
3749 * unique_tag = ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
3751 ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag);
3754 request->cb_idx = cb_idx;
3755 request->smid = smid;
3756 request->scmd = scmd;
3757 INIT_LIST_HEAD(&request->chain_list);
3762 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
3763 * @ioc: per adapter object
3764 * @cb_idx: callback index
3766 * Return: smid (zero is invalid)
3769 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3771 unsigned long flags;
3772 struct request_tracker *request;
3775 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3776 if (list_empty(&ioc->hpr_free_list)) {
3777 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3781 request = list_entry(ioc->hpr_free_list.next,
3782 struct request_tracker, tracker_list);
3783 request->cb_idx = cb_idx;
3784 smid = request->smid;
3785 list_del(&request->tracker_list);
3786 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3791 _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3794 * See _wait_for_commands_to_complete() call with regards to this code.
3796 if (ioc->shost_recovery && ioc->pending_io_count) {
3797 ioc->pending_io_count = scsi_host_busy(ioc->shost);
3798 if (ioc->pending_io_count == 0)
3799 wake_up(&ioc->reset_wq);
3803 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3804 struct scsiio_tracker *st)
3806 if (WARN_ON(st->smid == 0))
3811 atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
3816 * mpt3sas_base_free_smid - put smid back on free_list
3817 * @ioc: per adapter object
3818 * @smid: system request message index
3821 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3823 unsigned long flags;
3826 if (smid < ioc->hi_priority_smid) {
3827 struct scsiio_tracker *st;
3830 st = _get_st_from_smid(ioc, smid);
3832 _base_recovery_check(ioc);
3836 /* Clear MPI request frame */
3837 request = mpt3sas_base_get_msg_frame(ioc, smid);
3838 memset(request, 0, ioc->request_sz);
3840 mpt3sas_base_clear_st(ioc, st);
3841 _base_recovery_check(ioc);
3842 ioc->io_queue_num[smid - 1] = 0;
3846 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3847 if (smid < ioc->internal_smid) {
3849 i = smid - ioc->hi_priority_smid;
3850 ioc->hpr_lookup[i].cb_idx = 0xFF;
3851 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
3852 } else if (smid <= ioc->hba_queue_depth) {
3853 /* internal queue */
3854 i = smid - ioc->internal_smid;
3855 ioc->internal_lookup[i].cb_idx = 0xFF;
3856 list_add(&ioc->internal_lookup[i].tracker_list,
3857 &ioc->internal_free_list);
3859 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3863 * _base_mpi_ep_writeq - 32 bit write to MMIO
3865 * @addr: address in MMIO space
3866 * @writeq_lock: spin lock
3868 * This special handling for MPI EP to take care of 32 bit
3869 * environment where its not quarenteed to send the entire word
3873 _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3874 spinlock_t *writeq_lock)
3876 unsigned long flags;
3878 spin_lock_irqsave(writeq_lock, flags);
3879 __raw_writel((u32)(b), addr);
3880 __raw_writel((u32)(b >> 32), (addr + 4));
3881 spin_unlock_irqrestore(writeq_lock, flags);
3885 * _base_writeq - 64 bit write to MMIO
3887 * @addr: address in MMIO space
3888 * @writeq_lock: spin lock
3890 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
3891 * care of 32 bit environment where its not quarenteed to send the entire word
3894 #if defined(writeq) && defined(CONFIG_64BIT)
3896 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3899 __raw_writeq(b, addr);
3904 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3906 _base_mpi_ep_writeq(b, addr, writeq_lock);
3911 * _base_set_and_get_msix_index - get the msix index and assign to msix_io
3912 * variable of scsi tracker
3913 * @ioc: per adapter object
3914 * @smid: system request message index
3916 * Return: msix index.
3919 _base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3921 struct scsiio_tracker *st = NULL;
3923 if (smid < ioc->hi_priority_smid)
3924 st = _get_st_from_smid(ioc, smid);
3927 return _base_get_msix_index(ioc, NULL);
3929 st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
3934 * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
3935 * @ioc: per adapter object
3936 * @smid: system request message index
3937 * @handle: device handle
3940 _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
3941 u16 smid, u16 handle)
3943 Mpi2RequestDescriptorUnion_t descriptor;
3944 u64 *request = (u64 *)&descriptor;
3945 void *mpi_req_iomem;
3946 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3948 _clone_sg_entries(ioc, (void *) mfp, smid);
3949 mpi_req_iomem = (void __force *)ioc->chip +
3950 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3951 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3953 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3954 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3955 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3956 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3957 descriptor.SCSIIO.LMID = 0;
3958 _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3959 &ioc->scsi_lookup_lock);
3963 * _base_put_smid_scsi_io - send SCSI_IO request to firmware
3964 * @ioc: per adapter object
3965 * @smid: system request message index
3966 * @handle: device handle
3969 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3971 Mpi2RequestDescriptorUnion_t descriptor;
3972 u64 *request = (u64 *)&descriptor;
3975 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3976 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3977 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3978 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3979 descriptor.SCSIIO.LMID = 0;
3980 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3981 &ioc->scsi_lookup_lock);
3985 * _base_put_smid_fast_path - send fast path request to firmware
3986 * @ioc: per adapter object
3987 * @smid: system request message index
3988 * @handle: device handle
3991 _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3994 Mpi2RequestDescriptorUnion_t descriptor;
3995 u64 *request = (u64 *)&descriptor;
3997 descriptor.SCSIIO.RequestFlags =
3998 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
3999 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4000 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
4001 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
4002 descriptor.SCSIIO.LMID = 0;
4003 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4004 &ioc->scsi_lookup_lock);
4008 * _base_put_smid_hi_priority - send Task Management request to firmware
4009 * @ioc: per adapter object
4010 * @smid: system request message index
4011 * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
4014 _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4017 Mpi2RequestDescriptorUnion_t descriptor;
4018 void *mpi_req_iomem;
4021 if (ioc->is_mcpu_endpoint) {
4022 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4024 /* TBD 256 is offset within sys register. */
4025 mpi_req_iomem = (void __force *)ioc->chip
4026 + MPI_FRAME_START_OFFSET
4027 + (smid * ioc->request_sz);
4028 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
4032 request = (u64 *)&descriptor;
4034 descriptor.HighPriority.RequestFlags =
4035 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4036 descriptor.HighPriority.MSIxIndex = msix_task;
4037 descriptor.HighPriority.SMID = cpu_to_le16(smid);
4038 descriptor.HighPriority.LMID = 0;
4039 descriptor.HighPriority.Reserved1 = 0;
4040 if (ioc->is_mcpu_endpoint)
4041 _base_mpi_ep_writeq(*request,
4042 &ioc->chip->RequestDescriptorPostLow,
4043 &ioc->scsi_lookup_lock);
4045 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4046 &ioc->scsi_lookup_lock);
4050 * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
4052 * @ioc: per adapter object
4053 * @smid: system request message index
4056 mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4058 Mpi2RequestDescriptorUnion_t descriptor;
4059 u64 *request = (u64 *)&descriptor;
4061 descriptor.Default.RequestFlags =
4062 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
4063 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4064 descriptor.Default.SMID = cpu_to_le16(smid);
4065 descriptor.Default.LMID = 0;
4066 descriptor.Default.DescriptorTypeDependent = 0;
4067 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4068 &ioc->scsi_lookup_lock);
4072 * _base_put_smid_default - Default, primarily used for config pages
4073 * @ioc: per adapter object
4074 * @smid: system request message index
4077 _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4079 Mpi2RequestDescriptorUnion_t descriptor;
4080 void *mpi_req_iomem;
4083 if (ioc->is_mcpu_endpoint) {
4084 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4086 _clone_sg_entries(ioc, (void *) mfp, smid);
4087 /* TBD 256 is offset within sys register */
4088 mpi_req_iomem = (void __force *)ioc->chip +
4089 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
4090 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
4093 request = (u64 *)&descriptor;
4094 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4095 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4096 descriptor.Default.SMID = cpu_to_le16(smid);
4097 descriptor.Default.LMID = 0;
4098 descriptor.Default.DescriptorTypeDependent = 0;
4099 if (ioc->is_mcpu_endpoint)
4100 _base_mpi_ep_writeq(*request,
4101 &ioc->chip->RequestDescriptorPostLow,
4102 &ioc->scsi_lookup_lock);
4104 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4105 &ioc->scsi_lookup_lock);
4109 * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
4110 * Atomic Request Descriptor
4111 * @ioc: per adapter object
4112 * @smid: system request message index
4113 * @handle: device handle, unused in this function, for function type match
4118 _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4121 Mpi26AtomicRequestDescriptor_t descriptor;
4122 u32 *request = (u32 *)&descriptor;
4124 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4125 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4126 descriptor.SMID = cpu_to_le16(smid);
4128 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4132 * _base_put_smid_fast_path_atomic - send fast path request to firmware
4133 * using Atomic Request Descriptor
4134 * @ioc: per adapter object
4135 * @smid: system request message index
4136 * @handle: device handle, unused in this function, for function type match
4140 _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4143 Mpi26AtomicRequestDescriptor_t descriptor;
4144 u32 *request = (u32 *)&descriptor;
4146 descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
4147 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4148 descriptor.SMID = cpu_to_le16(smid);
4150 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4154 * _base_put_smid_hi_priority_atomic - send Task Management request to
4155 * firmware using Atomic Request Descriptor
4156 * @ioc: per adapter object
4157 * @smid: system request message index
4158 * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
4163 _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4166 Mpi26AtomicRequestDescriptor_t descriptor;
4167 u32 *request = (u32 *)&descriptor;
4169 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4170 descriptor.MSIxIndex = msix_task;
4171 descriptor.SMID = cpu_to_le16(smid);
4173 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4177 * _base_put_smid_default_atomic - Default, primarily used for config pages
4178 * use Atomic Request Descriptor
4179 * @ioc: per adapter object
4180 * @smid: system request message index
4185 _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4187 Mpi26AtomicRequestDescriptor_t descriptor;
4188 u32 *request = (u32 *)&descriptor;
4190 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4191 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4192 descriptor.SMID = cpu_to_le16(smid);
4194 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4198 * _base_display_OEMs_branding - Display branding string
4199 * @ioc: per adapter object
4202 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
4204 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
4207 switch (ioc->pdev->subsystem_vendor) {
4208 case PCI_VENDOR_ID_INTEL:
4209 switch (ioc->pdev->device) {
4210 case MPI2_MFGPAGE_DEVID_SAS2008:
4211 switch (ioc->pdev->subsystem_device) {
4212 case MPT2SAS_INTEL_RMS2LL080_SSDID:
4213 ioc_info(ioc, "%s\n",
4214 MPT2SAS_INTEL_RMS2LL080_BRANDING);
4216 case MPT2SAS_INTEL_RMS2LL040_SSDID:
4217 ioc_info(ioc, "%s\n",
4218 MPT2SAS_INTEL_RMS2LL040_BRANDING);
4220 case MPT2SAS_INTEL_SSD910_SSDID:
4221 ioc_info(ioc, "%s\n",
4222 MPT2SAS_INTEL_SSD910_BRANDING);
4225 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4226 ioc->pdev->subsystem_device);
4230 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4231 switch (ioc->pdev->subsystem_device) {
4232 case MPT2SAS_INTEL_RS25GB008_SSDID:
4233 ioc_info(ioc, "%s\n",
4234 MPT2SAS_INTEL_RS25GB008_BRANDING);
4236 case MPT2SAS_INTEL_RMS25JB080_SSDID:
4237 ioc_info(ioc, "%s\n",
4238 MPT2SAS_INTEL_RMS25JB080_BRANDING);
4240 case MPT2SAS_INTEL_RMS25JB040_SSDID:
4241 ioc_info(ioc, "%s\n",
4242 MPT2SAS_INTEL_RMS25JB040_BRANDING);
4244 case MPT2SAS_INTEL_RMS25KB080_SSDID:
4245 ioc_info(ioc, "%s\n",
4246 MPT2SAS_INTEL_RMS25KB080_BRANDING);
4248 case MPT2SAS_INTEL_RMS25KB040_SSDID:
4249 ioc_info(ioc, "%s\n",
4250 MPT2SAS_INTEL_RMS25KB040_BRANDING);
4252 case MPT2SAS_INTEL_RMS25LB040_SSDID:
4253 ioc_info(ioc, "%s\n",
4254 MPT2SAS_INTEL_RMS25LB040_BRANDING);
4256 case MPT2SAS_INTEL_RMS25LB080_SSDID:
4257 ioc_info(ioc, "%s\n",
4258 MPT2SAS_INTEL_RMS25LB080_BRANDING);
4261 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4262 ioc->pdev->subsystem_device);
4266 case MPI25_MFGPAGE_DEVID_SAS3008:
4267 switch (ioc->pdev->subsystem_device) {
4268 case MPT3SAS_INTEL_RMS3JC080_SSDID:
4269 ioc_info(ioc, "%s\n",
4270 MPT3SAS_INTEL_RMS3JC080_BRANDING);
4273 case MPT3SAS_INTEL_RS3GC008_SSDID:
4274 ioc_info(ioc, "%s\n",
4275 MPT3SAS_INTEL_RS3GC008_BRANDING);
4277 case MPT3SAS_INTEL_RS3FC044_SSDID:
4278 ioc_info(ioc, "%s\n",
4279 MPT3SAS_INTEL_RS3FC044_BRANDING);
4281 case MPT3SAS_INTEL_RS3UC080_SSDID:
4282 ioc_info(ioc, "%s\n",
4283 MPT3SAS_INTEL_RS3UC080_BRANDING);
4286 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4287 ioc->pdev->subsystem_device);
4292 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4293 ioc->pdev->subsystem_device);
4297 case PCI_VENDOR_ID_DELL:
4298 switch (ioc->pdev->device) {
4299 case MPI2_MFGPAGE_DEVID_SAS2008:
4300 switch (ioc->pdev->subsystem_device) {
4301 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
4302 ioc_info(ioc, "%s\n",
4303 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
4305 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
4306 ioc_info(ioc, "%s\n",
4307 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
4309 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
4310 ioc_info(ioc, "%s\n",
4311 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
4313 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
4314 ioc_info(ioc, "%s\n",
4315 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
4317 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
4318 ioc_info(ioc, "%s\n",
4319 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
4321 case MPT2SAS_DELL_PERC_H200_SSDID:
4322 ioc_info(ioc, "%s\n",
4323 MPT2SAS_DELL_PERC_H200_BRANDING);
4325 case MPT2SAS_DELL_6GBPS_SAS_SSDID:
4326 ioc_info(ioc, "%s\n",
4327 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
4330 ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
4331 ioc->pdev->subsystem_device);
4335 case MPI25_MFGPAGE_DEVID_SAS3008:
4336 switch (ioc->pdev->subsystem_device) {
4337 case MPT3SAS_DELL_12G_HBA_SSDID:
4338 ioc_info(ioc, "%s\n",
4339 MPT3SAS_DELL_12G_HBA_BRANDING);
4342 ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
4343 ioc->pdev->subsystem_device);
4348 ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
4349 ioc->pdev->subsystem_device);
4353 case PCI_VENDOR_ID_CISCO:
4354 switch (ioc->pdev->device) {
4355 case MPI25_MFGPAGE_DEVID_SAS3008:
4356 switch (ioc->pdev->subsystem_device) {
4357 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
4358 ioc_info(ioc, "%s\n",
4359 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
4361 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
4362 ioc_info(ioc, "%s\n",
4363 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
4365 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4366 ioc_info(ioc, "%s\n",
4367 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4370 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4371 ioc->pdev->subsystem_device);
4375 case MPI25_MFGPAGE_DEVID_SAS3108_1:
4376 switch (ioc->pdev->subsystem_device) {
4377 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4378 ioc_info(ioc, "%s\n",
4379 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4381 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
4382 ioc_info(ioc, "%s\n",
4383 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
4386 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4387 ioc->pdev->subsystem_device);
4392 ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
4393 ioc->pdev->subsystem_device);
4397 case MPT2SAS_HP_3PAR_SSVID:
4398 switch (ioc->pdev->device) {
4399 case MPI2_MFGPAGE_DEVID_SAS2004:
4400 switch (ioc->pdev->subsystem_device) {
4401 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
4402 ioc_info(ioc, "%s\n",
4403 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
4406 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4407 ioc->pdev->subsystem_device);
4411 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4412 switch (ioc->pdev->subsystem_device) {
4413 case MPT2SAS_HP_2_4_INTERNAL_SSDID:
4414 ioc_info(ioc, "%s\n",
4415 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
4417 case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
4418 ioc_info(ioc, "%s\n",
4419 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
4421 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
4422 ioc_info(ioc, "%s\n",
4423 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
4425 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
4426 ioc_info(ioc, "%s\n",
4427 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
4430 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4431 ioc->pdev->subsystem_device);
4436 ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
4437 ioc->pdev->subsystem_device);
4447 * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
4448 * version from FW Image Header.
4449 * @ioc: per adapter object
4451 * Return: 0 for success, non-zero for failure.
4454 _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
4456 Mpi2FWImageHeader_t *fw_img_hdr;
4457 Mpi26ComponentImageHeader_t *cmp_img_hdr;
4458 Mpi25FWUploadRequest_t *mpi_request;
4459 Mpi2FWUploadReply_t mpi_reply;
4460 int r = 0, issue_diag_reset = 0;
4461 u32 package_version = 0;
4462 void *fwpkg_data = NULL;
4463 dma_addr_t fwpkg_data_dma;
4464 u16 smid, ioc_status;
4467 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4469 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4470 ioc_err(ioc, "%s: internal command already in use\n", __func__);
4474 data_length = sizeof(Mpi2FWImageHeader_t);
4475 fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
4476 &fwpkg_data_dma, GFP_KERNEL);
4479 "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
4480 __FILE__, __LINE__, __func__);
4484 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4486 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4491 ioc->base_cmds.status = MPT3_CMD_PENDING;
4492 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4493 ioc->base_cmds.smid = smid;
4494 memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
4495 mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
4496 mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
4497 mpi_request->ImageSize = cpu_to_le32(data_length);
4498 ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
4500 init_completion(&ioc->base_cmds.done);
4501 ioc->put_smid_default(ioc, smid);
4502 /* Wait for 15 seconds */
4503 wait_for_completion_timeout(&ioc->base_cmds.done,
4504 FW_IMG_HDR_READ_TIMEOUT*HZ);
4505 ioc_info(ioc, "%s: complete\n", __func__);
4506 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4507 ioc_err(ioc, "%s: timeout\n", __func__);
4508 _debug_dump_mf(mpi_request,
4509 sizeof(Mpi25FWUploadRequest_t)/4);
4510 issue_diag_reset = 1;
4512 memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
4513 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
4514 memcpy(&mpi_reply, ioc->base_cmds.reply,
4515 sizeof(Mpi2FWUploadReply_t));
4516 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4517 MPI2_IOCSTATUS_MASK;
4518 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4519 fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
4520 if (le32_to_cpu(fw_img_hdr->Signature) ==
4521 MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
4523 (Mpi26ComponentImageHeader_t *)
4527 cmp_img_hdr->ApplicationSpecific);
4531 fw_img_hdr->PackageVersion.Word);
4532 if (package_version)
4534 "FW Package Ver(%02d.%02d.%02d.%02d)\n",
4535 ((package_version) & 0xFF000000) >> 24,
4536 ((package_version) & 0x00FF0000) >> 16,
4537 ((package_version) & 0x0000FF00) >> 8,
4538 (package_version) & 0x000000FF);
4540 _debug_dump_mf(&mpi_reply,
4541 sizeof(Mpi2FWUploadReply_t)/4);
4545 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4548 dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
4550 if (issue_diag_reset) {
4551 if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
4553 if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
4561 * _base_display_ioc_capabilities - Display IOC's capabilities.
4562 * @ioc: per adapter object
4565 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
4569 u32 iounit_pg1_flags;
4572 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
4573 strncpy(desc, ioc->manu_pg0.ChipName, 16);
4574 ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
4576 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
4577 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
4578 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
4579 ioc->facts.FWVersion.Word & 0x000000FF,
4580 ioc->pdev->revision,
4581 (bios_version & 0xFF000000) >> 24,
4582 (bios_version & 0x00FF0000) >> 16,
4583 (bios_version & 0x0000FF00) >> 8,
4584 bios_version & 0x000000FF);
4586 _base_display_OEMs_branding(ioc);
4588 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4589 pr_info("%sNVMe", i ? "," : "");
4593 ioc_info(ioc, "Protocol=(");
4595 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
4596 pr_cont("Initiator");
4600 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
4601 pr_cont("%sTarget", i ? "," : "");
4606 pr_cont("), Capabilities=(");
4608 if (!ioc->hide_ir_msg) {
4609 if (ioc->facts.IOCCapabilities &
4610 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
4616 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
4617 pr_cont("%sTLR", i ? "," : "");
4621 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
4622 pr_cont("%sMulticast", i ? "," : "");
4626 if (ioc->facts.IOCCapabilities &
4627 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
4628 pr_cont("%sBIDI Target", i ? "," : "");
4632 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4633 pr_cont("%sEEDP", i ? "," : "");
4637 if (ioc->facts.IOCCapabilities &
4638 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
4639 pr_cont("%sSnapshot Buffer", i ? "," : "");
4643 if (ioc->facts.IOCCapabilities &
4644 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
4645 pr_cont("%sDiag Trace Buffer", i ? "," : "");
4649 if (ioc->facts.IOCCapabilities &
4650 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
4651 pr_cont("%sDiag Extended Buffer", i ? "," : "");
4655 if (ioc->facts.IOCCapabilities &
4656 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
4657 pr_cont("%sTask Set Full", i ? "," : "");
4661 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4662 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
4663 pr_cont("%sNCQ", i ? "," : "");
4671 * mpt3sas_base_update_missing_delay - change the missing delay timers
4672 * @ioc: per adapter object
4673 * @device_missing_delay: amount of time till device is reported missing
4674 * @io_missing_delay: interval IO is returned when there is a missing device
4676 * Passed on the command line, this function will modify the device missing
4677 * delay, as well as the io missing delay. This should be called at driver
4681 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4682 u16 device_missing_delay, u8 io_missing_delay)
4684 u16 dmd, dmd_new, dmd_orignal;
4685 u8 io_missing_delay_original;
4687 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4688 Mpi2ConfigReply_t mpi_reply;
4692 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4696 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4697 sizeof(Mpi2SasIOUnit1PhyData_t));
4698 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4699 if (!sas_iounit_pg1) {
4700 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4701 __FILE__, __LINE__, __func__);
4704 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4705 sas_iounit_pg1, sz))) {
4706 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4707 __FILE__, __LINE__, __func__);
4710 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4711 MPI2_IOCSTATUS_MASK;
4712 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4713 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4714 __FILE__, __LINE__, __func__);
4718 /* device missing delay */
4719 dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4720 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4721 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4723 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4725 if (device_missing_delay > 0x7F) {
4726 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4727 device_missing_delay;
4729 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4731 dmd = device_missing_delay;
4732 sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4734 /* io missing delay */
4735 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4736 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4738 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4740 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4742 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4745 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4746 ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4747 dmd_orignal, dmd_new);
4748 ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4749 io_missing_delay_original,
4751 ioc->device_missing_delay = dmd_new;
4752 ioc->io_missing_delay = io_missing_delay;
4756 kfree(sas_iounit_pg1);
4760 * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields
4761 * according to performance mode.
4762 * @ioc : per adapter object
4764 * Return: zero on success; otherwise return EAGAIN error code asking the
4768 _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
4770 Mpi2IOCPage1_t ioc_pg1;
4771 Mpi2ConfigReply_t mpi_reply;
4774 rc = mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
4777 memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
4779 switch (perf_mode) {
4780 case MPT_PERF_MODE_DEFAULT:
4781 case MPT_PERF_MODE_BALANCED:
4782 if (ioc->high_iops_queues) {
4784 "Enable interrupt coalescing only for first\t"
4785 "%d reply queues\n",
4786 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
4788 * If 31st bit is zero then interrupt coalescing is
4789 * enabled for all reply descriptor post queues.
4790 * If 31st bit is set to one then user can
4791 * enable/disable interrupt coalescing on per reply
4792 * descriptor post queue group(8) basis. So to enable
4793 * interrupt coalescing only on first reply descriptor
4794 * post queue group 31st bit and zero th bit is enabled.
4796 ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
4797 ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
4798 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4801 ioc_info(ioc, "performance mode: balanced\n");
4805 case MPT_PERF_MODE_LATENCY:
4807 * Enable interrupt coalescing on all reply queues
4808 * with timeout value 0xA
4810 ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
4811 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4812 ioc_pg1.ProductSpecific = 0;
4813 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4816 ioc_info(ioc, "performance mode: latency\n");
4818 case MPT_PERF_MODE_IOPS:
4820 * Enable interrupt coalescing on all reply queues.
4823 "performance mode: iops with coalescing timeout: 0x%x\n",
4824 le32_to_cpu(ioc_pg1.CoalescingTimeout));
4825 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4826 ioc_pg1.ProductSpecific = 0;
4827 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4836 * _base_get_event_diag_triggers - get event diag trigger values from
4838 * @ioc : per adapter object
4843 _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
4845 Mpi26DriverTriggerPage2_t trigger_pg2;
4846 struct SL_WH_EVENT_TRIGGER_T *event_tg;
4847 MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY *mpi_event_tg;
4848 Mpi2ConfigReply_t mpi_reply;
4853 r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply,
4858 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4859 MPI2_IOCSTATUS_MASK;
4860 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4863 "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
4864 __func__, ioc_status));
4868 if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) {
4869 count = le16_to_cpu(trigger_pg2.NumMPIEventTrigger);
4870 count = min_t(u16, NUM_VALID_ENTRIES, count);
4871 ioc->diag_trigger_event.ValidEntries = count;
4873 event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0];
4874 mpi_event_tg = &trigger_pg2.MPIEventTriggers[0];
4875 for (i = 0; i < count; i++) {
4876 event_tg->EventValue = le16_to_cpu(
4877 mpi_event_tg->MPIEventCode);
4878 event_tg->LogEntryQualifier = le16_to_cpu(
4879 mpi_event_tg->MPIEventCodeSpecific);
4888 * _base_get_scsi_diag_triggers - get scsi diag trigger values from
4890 * @ioc : per adapter object
4892 * Return: 0 on success; otherwise return failure status.
4895 _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
4897 Mpi26DriverTriggerPage3_t trigger_pg3;
4898 struct SL_WH_SCSI_TRIGGER_T *scsi_tg;
4899 MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY *mpi_scsi_tg;
4900 Mpi2ConfigReply_t mpi_reply;
4905 r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply,
4910 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4911 MPI2_IOCSTATUS_MASK;
4912 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4915 "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
4916 __func__, ioc_status));
4920 if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) {
4921 count = le16_to_cpu(trigger_pg3.NumSCSISenseTrigger);
4922 count = min_t(u16, NUM_VALID_ENTRIES, count);
4923 ioc->diag_trigger_scsi.ValidEntries = count;
4925 scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0];
4926 mpi_scsi_tg = &trigger_pg3.SCSISenseTriggers[0];
4927 for (i = 0; i < count; i++) {
4928 scsi_tg->ASCQ = mpi_scsi_tg->ASCQ;
4929 scsi_tg->ASC = mpi_scsi_tg->ASC;
4930 scsi_tg->SenseKey = mpi_scsi_tg->SenseKey;
4940 * _base_get_mpi_diag_triggers - get mpi diag trigger values from
4942 * @ioc : per adapter object
4944 * Return: 0 on success; otherwise return failure status.
4947 _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
4949 Mpi26DriverTriggerPage4_t trigger_pg4;
4950 struct SL_WH_MPI_TRIGGER_T *status_tg;
4951 MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY *mpi_status_tg;
4952 Mpi2ConfigReply_t mpi_reply;
4957 r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply,
4962 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4963 MPI2_IOCSTATUS_MASK;
4964 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4967 "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
4968 __func__, ioc_status));
4972 if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) {
4973 count = le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger);
4974 count = min_t(u16, NUM_VALID_ENTRIES, count);
4975 ioc->diag_trigger_mpi.ValidEntries = count;
4977 status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0];
4978 mpi_status_tg = &trigger_pg4.IOCStatusLoginfoTriggers[0];
4980 for (i = 0; i < count; i++) {
4981 status_tg->IOCStatus = le16_to_cpu(
4982 mpi_status_tg->IOCStatus);
4983 status_tg->IocLogInfo = le32_to_cpu(
4984 mpi_status_tg->LogInfo);
4994 * _base_get_master_diag_triggers - get master diag trigger values from
4996 * @ioc : per adapter object
5001 _base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5003 Mpi26DriverTriggerPage1_t trigger_pg1;
5004 Mpi2ConfigReply_t mpi_reply;
5008 r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply,
5013 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5014 MPI2_IOCSTATUS_MASK;
5015 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5018 "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
5019 __func__, ioc_status));
5023 if (le16_to_cpu(trigger_pg1.NumMasterTrigger))
5024 ioc->diag_trigger_master.MasterData |=
5026 trigger_pg1.MasterTriggers[0].MasterTriggerFlags);
5031 * _base_check_for_trigger_pages_support - checks whether HBA FW supports
5032 * driver trigger pages or not
5033 * @ioc : per adapter object
5034 * @trigger_flags : address where trigger page0's TriggerFlags value is copied
5036 * Return: trigger flags mask if HBA FW supports driver trigger pages;
5037 * otherwise returns %-EFAULT if driver trigger pages are not supported by FW or
5038 * return EAGAIN if diag reset occurred due to FW fault and asking the
5039 * caller to retry the command.
5043 _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc, u32 *trigger_flags)
5045 Mpi26DriverTriggerPage0_t trigger_pg0;
5047 Mpi2ConfigReply_t mpi_reply;
5050 r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply,
5055 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5056 MPI2_IOCSTATUS_MASK;
5057 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5060 *trigger_flags = le16_to_cpu(trigger_pg0.TriggerFlags);
5065 * _base_get_diag_triggers - Retrieve diag trigger values from
5067 * @ioc : per adapter object
5069 * Return: zero on success; otherwise return EAGAIN error codes
5070 * asking the caller to retry.
5073 _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5079 * Default setting of master trigger.
5081 ioc->diag_trigger_master.MasterData =
5082 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
5084 r = _base_check_for_trigger_pages_support(ioc, &trigger_flags);
5089 * Don't go for error handling when FW doesn't support
5090 * driver trigger pages.
5095 ioc->supports_trigger_pages = 1;
5098 * Retrieve master diag trigger values from driver trigger pg1
5099 * if master trigger bit enabled in TriggerFlags.
5101 if ((u16)trigger_flags &
5102 MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID) {
5103 r = _base_get_master_diag_triggers(ioc);
5109 * Retrieve event diag trigger values from driver trigger pg2
5110 * if event trigger bit enabled in TriggerFlags.
5112 if ((u16)trigger_flags &
5113 MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID) {
5114 r = _base_get_event_diag_triggers(ioc);
5120 * Retrieve scsi diag trigger values from driver trigger pg3
5121 * if scsi trigger bit enabled in TriggerFlags.
5123 if ((u16)trigger_flags &
5124 MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID) {
5125 r = _base_get_scsi_diag_triggers(ioc);
5130 * Retrieve mpi error diag trigger values from driver trigger pg4
5131 * if loginfo trigger bit enabled in TriggerFlags.
5133 if ((u16)trigger_flags &
5134 MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID) {
5135 r = _base_get_mpi_diag_triggers(ioc);
5143 * _base_update_diag_trigger_pages - Update the driver trigger pages after
5144 * online FW update, in case updated FW supports driver
5146 * @ioc : per adapter object
5151 _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
5154 if (ioc->diag_trigger_master.MasterData)
5155 mpt3sas_config_update_driver_trigger_pg1(ioc,
5156 &ioc->diag_trigger_master, 1);
5158 if (ioc->diag_trigger_event.ValidEntries)
5159 mpt3sas_config_update_driver_trigger_pg2(ioc,
5160 &ioc->diag_trigger_event, 1);
5162 if (ioc->diag_trigger_scsi.ValidEntries)
5163 mpt3sas_config_update_driver_trigger_pg3(ioc,
5164 &ioc->diag_trigger_scsi, 1);
5166 if (ioc->diag_trigger_mpi.ValidEntries)
5167 mpt3sas_config_update_driver_trigger_pg4(ioc,
5168 &ioc->diag_trigger_mpi, 1);
5172 * _base_static_config_pages - static start of day config pages
5173 * @ioc: per adapter object
5176 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
5178 Mpi2ConfigReply_t mpi_reply;
5179 u32 iounit_pg1_flags;
5182 ioc->nvme_abort_timeout = 30;
5184 rc = mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply,
5188 if (ioc->ir_firmware) {
5189 rc = mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
5195 * Ensure correct T10 PI operation if vendor left EEDPTagMode
5196 * flag unset in NVDATA.
5198 rc = mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply,
5202 if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
5203 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
5205 ioc->manu_pg11.EEDPTagMode &= ~0x3;
5206 ioc->manu_pg11.EEDPTagMode |= 0x1;
5207 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
5210 if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
5211 ioc->tm_custom_handling = 1;
5213 ioc->tm_custom_handling = 0;
5214 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
5215 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
5216 else if (ioc->manu_pg11.NVMeAbortTO >
5217 NVME_TASK_ABORT_MAX_TIMEOUT)
5218 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
5220 ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
5222 ioc->time_sync_interval =
5223 ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK;
5224 if (ioc->time_sync_interval) {
5225 if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK)
5226 ioc->time_sync_interval =
5227 ioc->time_sync_interval * SECONDS_PER_HOUR;
5229 ioc->time_sync_interval =
5230 ioc->time_sync_interval * SECONDS_PER_MIN;
5231 dinitprintk(ioc, ioc_info(ioc,
5232 "Driver-FW TimeSync interval is %d seconds. ManuPg11 TimeSync Unit is in %s\n",
5233 ioc->time_sync_interval, (ioc->manu_pg11.TimeSyncInterval &
5234 MPT3SAS_TIMESYNC_UNIT_MASK) ? "Hour" : "Minute"));
5236 if (ioc->is_gen35_ioc)
5238 "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
5240 rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
5243 rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
5246 rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
5249 rc = mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
5252 rc = mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
5255 rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
5258 _base_display_ioc_capabilities(ioc);
5261 * Enable task_set_full handling in iounit_pg1 when the
5262 * facts capabilities indicate that its supported.
5264 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
5265 if ((ioc->facts.IOCCapabilities &
5266 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
5268 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
5271 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
5272 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
5273 rc = mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
5277 if (ioc->iounit_pg8.NumSensors)
5278 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
5279 if (ioc->is_aero_ioc) {
5280 rc = _base_update_ioc_page1_inlinewith_perf_mode(ioc);
5284 if (ioc->is_gen35_ioc) {
5285 if (ioc->is_driver_loading) {
5286 rc = _base_get_diag_triggers(ioc);
5291 * In case of online HBA FW update operation,
5292 * check whether updated FW supports the driver trigger
5294 * - If previous FW has not supported driver trigger
5295 * pages and newer FW supports them then update these
5296 * pages with current diag trigger values.
5297 * - If previous FW has supported driver trigger pages
5298 * and new FW doesn't support them then disable
5299 * support_trigger_pages flag.
5301 _base_check_for_trigger_pages_support(ioc, &tg_flags);
5302 if (!ioc->supports_trigger_pages && tg_flags != -EFAULT)
5303 _base_update_diag_trigger_pages(ioc);
5304 else if (ioc->supports_trigger_pages &&
5305 tg_flags == -EFAULT)
5306 ioc->supports_trigger_pages = 0;
5313 * mpt3sas_free_enclosure_list - release memory
5314 * @ioc: per adapter object
5316 * Free memory allocated during enclosure add.
5319 mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
5321 struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
5323 /* Free enclosure list */
5324 list_for_each_entry_safe(enclosure_dev,
5325 enclosure_dev_next, &ioc->enclosure_list, list) {
5326 list_del(&enclosure_dev->list);
5327 kfree(enclosure_dev);
5332 * _base_release_memory_pools - release memory
5333 * @ioc: per adapter object
5335 * Free memory allocated from _base_allocate_memory_pools.
5338 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5342 int dma_alloc_count = 0;
5343 struct chain_tracker *ct;
5344 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
5346 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5349 dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
5350 ioc->request, ioc->request_dma);
5352 ioc_info(ioc, "request_pool(0x%p): free\n",
5354 ioc->request = NULL;
5358 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
5359 dma_pool_destroy(ioc->sense_dma_pool);
5361 ioc_info(ioc, "sense_pool(0x%p): free\n",
5367 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
5368 dma_pool_destroy(ioc->reply_dma_pool);
5370 ioc_info(ioc, "reply_pool(0x%p): free\n",
5375 if (ioc->reply_free) {
5376 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
5377 ioc->reply_free_dma);
5378 dma_pool_destroy(ioc->reply_free_dma_pool);
5380 ioc_info(ioc, "reply_free_pool(0x%p): free\n",
5382 ioc->reply_free = NULL;
5385 if (ioc->reply_post) {
5386 dma_alloc_count = DIV_ROUND_UP(count,
5387 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
5388 for (i = 0; i < count; i++) {
5389 if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
5390 && dma_alloc_count) {
5391 if (ioc->reply_post[i].reply_post_free) {
5393 ioc->reply_post_free_dma_pool,
5394 ioc->reply_post[i].reply_post_free,
5395 ioc->reply_post[i].reply_post_free_dma);
5396 dexitprintk(ioc, ioc_info(ioc,
5397 "reply_post_free_pool(0x%p): free\n",
5398 ioc->reply_post[i].reply_post_free));
5399 ioc->reply_post[i].reply_post_free =
5405 dma_pool_destroy(ioc->reply_post_free_dma_pool);
5406 if (ioc->reply_post_free_array &&
5407 ioc->rdpq_array_enable) {
5408 dma_pool_free(ioc->reply_post_free_array_dma_pool,
5409 ioc->reply_post_free_array,
5410 ioc->reply_post_free_array_dma);
5411 ioc->reply_post_free_array = NULL;
5413 dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
5414 kfree(ioc->reply_post);
5417 if (ioc->pcie_sgl_dma_pool) {
5418 for (i = 0; i < ioc->scsiio_depth; i++) {
5419 dma_pool_free(ioc->pcie_sgl_dma_pool,
5420 ioc->pcie_sg_lookup[i].pcie_sgl,
5421 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5422 ioc->pcie_sg_lookup[i].pcie_sgl = NULL;
5424 dma_pool_destroy(ioc->pcie_sgl_dma_pool);
5426 if (ioc->config_page) {
5428 ioc_info(ioc, "config_page(0x%p): free\n",
5430 dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
5431 ioc->config_page, ioc->config_page_dma);
5434 kfree(ioc->hpr_lookup);
5435 ioc->hpr_lookup = NULL;
5436 kfree(ioc->internal_lookup);
5437 ioc->internal_lookup = NULL;
5438 if (ioc->chain_lookup) {
5439 for (i = 0; i < ioc->scsiio_depth; i++) {
5440 for (j = ioc->chains_per_prp_buffer;
5441 j < ioc->chains_needed_per_io; j++) {
5442 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5443 if (ct && ct->chain_buffer)
5444 dma_pool_free(ioc->chain_dma_pool,
5446 ct->chain_buffer_dma);
5448 kfree(ioc->chain_lookup[i].chains_per_smid);
5450 dma_pool_destroy(ioc->chain_dma_pool);
5451 kfree(ioc->chain_lookup);
5452 ioc->chain_lookup = NULL;
5455 kfree(ioc->io_queue_num);
5456 ioc->io_queue_num = NULL;
5460 * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
5461 * having same upper 32bits in their base memory address.
5462 * @reply_pool_start_address: Base address of a reply queue set
5463 * @pool_sz: Size of single Reply Descriptor Post Queues pool size
5465 * Return: 1 if reply queues in a set have a same upper 32bits in their base
5466 * memory address, else 0.
5470 mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
5472 long reply_pool_end_address;
5474 reply_pool_end_address = reply_pool_start_address + pool_sz;
5476 if (upper_32_bits(reply_pool_start_address) ==
5477 upper_32_bits(reply_pool_end_address))
5484 * _base_reduce_hba_queue_depth- Retry with reduced queue depth
5485 * @ioc: Adapter object
5487 * Return: 0 for success, non-zero for failure.
5490 _base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc)
5494 if ((ioc->hba_queue_depth - reduce_sz) >
5495 (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) {
5496 ioc->hba_queue_depth -= reduce_sz;
5503 * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory
5504 * for pcie sgl pools.
5505 * @ioc: Adapter object
5506 * @sz: DMA Pool size
5508 * Return: 0 for success, non-zero for failure.
5512 _base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5515 struct chain_tracker *ct;
5517 ioc->pcie_sgl_dma_pool =
5518 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz,
5520 if (!ioc->pcie_sgl_dma_pool) {
5521 ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n");
5525 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
5526 ioc->chains_per_prp_buffer =
5527 min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io);
5528 for (i = 0; i < ioc->scsiio_depth; i++) {
5529 ioc->pcie_sg_lookup[i].pcie_sgl =
5530 dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL,
5531 &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5532 if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
5533 ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
5537 if (!mpt3sas_check_same_4gb_region(
5538 (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) {
5539 ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
5540 ioc->pcie_sg_lookup[i].pcie_sgl,
5541 (unsigned long long)
5542 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5543 ioc->use_32bit_dma = true;
5547 for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
5548 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5550 ioc->pcie_sg_lookup[i].pcie_sgl +
5551 (j * ioc->chain_segment_sz);
5552 ct->chain_buffer_dma =
5553 ioc->pcie_sg_lookup[i].pcie_sgl_dma +
5554 (j * ioc->chain_segment_sz);
5557 dinitprintk(ioc, ioc_info(ioc,
5558 "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
5559 ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
5560 dinitprintk(ioc, ioc_info(ioc,
5561 "Number of chains can fit in a PRP page(%d)\n",
5562 ioc->chains_per_prp_buffer));
5567 * _base_allocate_chain_dma_pool - Allocating DMA'able memory
5568 * for chain dma pool.
5569 * @ioc: Adapter object
5570 * @sz: DMA Pool size
5572 * Return: 0 for success, non-zero for failure.
5575 _base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5578 struct chain_tracker *ctr;
5580 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
5581 ioc->chain_segment_sz, 16, 0);
5582 if (!ioc->chain_dma_pool)
5585 for (i = 0; i < ioc->scsiio_depth; i++) {
5586 for (j = ioc->chains_per_prp_buffer;
5587 j < ioc->chains_needed_per_io; j++) {
5588 ctr = &ioc->chain_lookup[i].chains_per_smid[j];
5589 ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool,
5590 GFP_KERNEL, &ctr->chain_buffer_dma);
5591 if (!ctr->chain_buffer)
5593 if (!mpt3sas_check_same_4gb_region((long)
5594 ctr->chain_buffer, ioc->chain_segment_sz)) {
5596 "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n",
5598 (unsigned long long)ctr->chain_buffer_dma);
5599 ioc->use_32bit_dma = true;
5604 dinitprintk(ioc, ioc_info(ioc,
5605 "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n",
5606 ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth *
5607 (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) *
5608 ioc->chain_segment_sz))/1024));
5613 * _base_allocate_sense_dma_pool - Allocating DMA'able memory
5614 * for sense dma pool.
5615 * @ioc: Adapter object
5616 * @sz: DMA Pool size
5617 * Return: 0 for success, non-zero for failure.
5620 _base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5622 ioc->sense_dma_pool =
5623 dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0);
5624 if (!ioc->sense_dma_pool)
5626 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool,
5627 GFP_KERNEL, &ioc->sense_dma);
5630 if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
5631 dinitprintk(ioc, pr_err(
5632 "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n",
5633 ioc->sense, (unsigned long long) ioc->sense_dma));
5634 ioc->use_32bit_dma = true;
5638 "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n",
5639 ioc->sense, (unsigned long long)ioc->sense_dma,
5640 ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024);
5645 * _base_allocate_reply_pool - Allocating DMA'able memory
5647 * @ioc: Adapter object
5648 * @sz: DMA Pool size
5649 * Return: 0 for success, non-zero for failure.
5652 _base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5654 /* reply pool, 4 byte align */
5655 ioc->reply_dma_pool = dma_pool_create("reply pool",
5656 &ioc->pdev->dev, sz, 4, 0);
5657 if (!ioc->reply_dma_pool)
5659 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
5663 if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
5664 dinitprintk(ioc, pr_err(
5665 "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n",
5666 ioc->reply, (unsigned long long) ioc->reply_dma));
5667 ioc->use_32bit_dma = true;
5670 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
5671 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
5673 "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n",
5674 ioc->reply, (unsigned long long)ioc->reply_dma,
5675 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024);
5680 * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory
5681 * for reply free dma pool.
5682 * @ioc: Adapter object
5683 * @sz: DMA Pool size
5684 * Return: 0 for success, non-zero for failure.
5687 _base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5689 /* reply free queue, 16 byte align */
5690 ioc->reply_free_dma_pool = dma_pool_create(
5691 "reply_free pool", &ioc->pdev->dev, sz, 16, 0);
5692 if (!ioc->reply_free_dma_pool)
5694 ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool,
5695 GFP_KERNEL, &ioc->reply_free_dma);
5696 if (!ioc->reply_free)
5698 if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
5700 pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
5701 ioc->reply_free, (unsigned long long) ioc->reply_free_dma));
5702 ioc->use_32bit_dma = true;
5705 memset(ioc->reply_free, 0, sz);
5706 dinitprintk(ioc, ioc_info(ioc,
5707 "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
5708 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
5709 dinitprintk(ioc, ioc_info(ioc,
5710 "reply_free_dma (0x%llx)\n",
5711 (unsigned long long)ioc->reply_free_dma));
5716 * _base_allocate_reply_post_free_array - Allocating DMA'able memory
5717 * for reply post free array.
5718 * @ioc: Adapter object
5719 * @reply_post_free_array_sz: DMA Pool size
5720 * Return: 0 for success, non-zero for failure.
5724 _base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc,
5725 u32 reply_post_free_array_sz)
5727 ioc->reply_post_free_array_dma_pool =
5728 dma_pool_create("reply_post_free_array pool",
5729 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
5730 if (!ioc->reply_post_free_array_dma_pool)
5732 ioc->reply_post_free_array =
5733 dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
5734 GFP_KERNEL, &ioc->reply_post_free_array_dma);
5735 if (!ioc->reply_post_free_array)
5737 if (!mpt3sas_check_same_4gb_region((long)ioc->reply_post_free_array,
5738 reply_post_free_array_sz)) {
5739 dinitprintk(ioc, pr_err(
5740 "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
5742 (unsigned long long) ioc->reply_free_dma));
5743 ioc->use_32bit_dma = true;
5749 * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
5751 * @ioc: per adapter object
5752 * @sz: DMA Pool size
5753 * Return: 0 for success, non-zero for failure.
5756 base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
5759 u32 dma_alloc_count = 0;
5760 int reply_post_free_sz = ioc->reply_post_queue_depth *
5761 sizeof(Mpi2DefaultReplyDescriptor_t);
5762 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
5764 ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
5766 if (!ioc->reply_post)
5769 * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
5770 * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should
5771 * be within 4GB boundary i.e reply queues in a set must have same
5772 * upper 32-bits in their memory address. so here driver is allocating
5773 * the DMA'able memory for reply queues according.
5774 * Driver uses limitation of
5775 * VENTURA_SERIES to manage INVADER_SERIES as well.
5777 dma_alloc_count = DIV_ROUND_UP(count,
5778 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
5779 ioc->reply_post_free_dma_pool =
5780 dma_pool_create("reply_post_free pool",
5781 &ioc->pdev->dev, sz, 16, 0);
5782 if (!ioc->reply_post_free_dma_pool)
5784 for (i = 0; i < count; i++) {
5785 if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
5786 ioc->reply_post[i].reply_post_free =
5787 dma_pool_zalloc(ioc->reply_post_free_dma_pool,
5789 &ioc->reply_post[i].reply_post_free_dma);
5790 if (!ioc->reply_post[i].reply_post_free)
5793 * Each set of RDPQ pool must satisfy 4gb boundary
5795 * 1) Check if allocated resources for RDPQ pool are in
5796 * the same 4GB range.
5797 * 2) If #1 is true, continue with 64 bit DMA.
5798 * 3) If #1 is false, return 1. which means free all the
5799 * resources and set DMA mask to 32 and allocate.
5801 if (!mpt3sas_check_same_4gb_region(
5802 (long)ioc->reply_post[i].reply_post_free, sz)) {
5804 ioc_err(ioc, "bad Replypost free pool(0x%p)"
5805 "reply_post_free_dma = (0x%llx)\n",
5806 ioc->reply_post[i].reply_post_free,
5807 (unsigned long long)
5808 ioc->reply_post[i].reply_post_free_dma));
5814 ioc->reply_post[i].reply_post_free =
5815 (Mpi2ReplyDescriptorsUnion_t *)
5816 ((long)ioc->reply_post[i-1].reply_post_free
5817 + reply_post_free_sz);
5818 ioc->reply_post[i].reply_post_free_dma =
5820 (ioc->reply_post[i-1].reply_post_free_dma +
5821 reply_post_free_sz);
5828 * _base_allocate_memory_pools - allocate start of day memory pools
5829 * @ioc: per adapter object
5831 * Return: 0 success, anything else error.
5834 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5836 struct mpt3sas_facts *facts;
5837 u16 max_sge_elements;
5838 u16 chains_needed_per_io;
5839 u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
5841 u32 rdpq_sz = 0, sense_sz = 0;
5842 u16 max_request_credit, nvme_blocks_needed;
5843 unsigned short sg_tablesize;
5846 int ret = 0, rc = 0;
5848 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5852 facts = &ioc->facts;
5854 /* command line tunables for max sgl entries */
5855 if (max_sgl_entries != -1)
5856 sg_tablesize = max_sgl_entries;
5858 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
5859 sg_tablesize = MPT2SAS_SG_DEPTH;
5861 sg_tablesize = MPT3SAS_SG_DEPTH;
5864 /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
5866 sg_tablesize = min_t(unsigned short, sg_tablesize,
5867 MPT_KDUMP_MIN_PHYS_SEGMENTS);
5869 if (ioc->is_mcpu_endpoint)
5870 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5872 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
5873 sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5874 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
5875 sg_tablesize = min_t(unsigned short, sg_tablesize,
5877 ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
5878 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
5880 ioc->shost->sg_tablesize = sg_tablesize;
5883 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
5884 (facts->RequestCredit / 4));
5885 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
5886 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
5887 INTERNAL_SCSIIO_CMDS_COUNT)) {
5888 ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
5889 facts->RequestCredit);
5892 ioc->internal_depth = 10;
5895 ioc->hi_priority_depth = ioc->internal_depth - (5);
5896 /* command line tunables for max controller queue depth */
5897 if (max_queue_depth != -1 && max_queue_depth != 0) {
5898 max_request_credit = min_t(u16, max_queue_depth +
5899 ioc->internal_depth, facts->RequestCredit);
5900 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
5901 max_request_credit = MAX_HBA_QUEUE_DEPTH;
5902 } else if (reset_devices)
5903 max_request_credit = min_t(u16, facts->RequestCredit,
5904 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
5906 max_request_credit = min_t(u16, facts->RequestCredit,
5907 MAX_HBA_QUEUE_DEPTH);
5909 /* Firmware maintains additional facts->HighPriorityCredit number of
5910 * credits for HiPriprity Request messages, so hba queue depth will be
5911 * sum of max_request_credit and high priority queue depth.
5913 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
5915 /* request frame size */
5916 ioc->request_sz = facts->IOCRequestFrameSize * 4;
5918 /* reply frame size */
5919 ioc->reply_sz = facts->ReplyFrameSize * 4;
5921 /* chain segment size */
5922 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
5923 if (facts->IOCMaxChainSegmentSize)
5924 ioc->chain_segment_sz =
5925 facts->IOCMaxChainSegmentSize *
5928 /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
5929 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
5932 ioc->chain_segment_sz = ioc->request_sz;
5934 /* calculate the max scatter element size */
5935 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
5939 /* calculate number of sg elements left over in the 1st frame */
5940 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
5941 sizeof(Mpi2SGEIOUnion_t)) + sge_size);
5942 ioc->max_sges_in_main_message = max_sge_elements/sge_size;
5944 /* now do the same for a chain buffer */
5945 max_sge_elements = ioc->chain_segment_sz - sge_size;
5946 ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
5949 * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
5951 chains_needed_per_io = ((ioc->shost->sg_tablesize -
5952 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
5954 if (chains_needed_per_io > facts->MaxChainDepth) {
5955 chains_needed_per_io = facts->MaxChainDepth;
5956 ioc->shost->sg_tablesize = min_t(u16,
5957 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
5958 * chains_needed_per_io), ioc->shost->sg_tablesize);
5960 ioc->chains_needed_per_io = chains_needed_per_io;
5962 /* reply free queue sizing - taking into account for 64 FW events */
5963 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5965 /* mCPU manage single counters for simplicity */
5966 if (ioc->is_mcpu_endpoint)
5967 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
5969 /* calculate reply descriptor post queue depth */
5970 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
5971 ioc->reply_free_queue_depth + 1;
5972 /* align the reply post queue on the next 16 count boundary */
5973 if (ioc->reply_post_queue_depth % 16)
5974 ioc->reply_post_queue_depth += 16 -
5975 (ioc->reply_post_queue_depth % 16);
5978 if (ioc->reply_post_queue_depth >
5979 facts->MaxReplyDescriptorPostQueueDepth) {
5980 ioc->reply_post_queue_depth =
5981 facts->MaxReplyDescriptorPostQueueDepth -
5982 (facts->MaxReplyDescriptorPostQueueDepth % 16);
5983 ioc->hba_queue_depth =
5984 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
5985 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5989 "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
5990 "sge_per_io(%d), chains_per_io(%d)\n",
5991 ioc->max_sges_in_main_message,
5992 ioc->max_sges_in_chain_message,
5993 ioc->shost->sg_tablesize,
5994 ioc->chains_needed_per_io);
5996 /* reply post queue, 16 byte align */
5997 reply_post_free_sz = ioc->reply_post_queue_depth *
5998 sizeof(Mpi2DefaultReplyDescriptor_t);
5999 rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
6000 if ((_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
6001 || (ioc->reply_queue_count < RDPQ_MAX_INDEX_IN_ONE_CHUNK))
6002 rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
6003 ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
6004 if (ret == -EAGAIN) {
6006 * Free allocated bad RDPQ memory pools.
6007 * Change dma coherent mask to 32 bit and reallocate RDPQ
6009 _base_release_memory_pools(ioc);
6010 ioc->use_32bit_dma = true;
6011 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
6013 "32 DMA mask failed %s\n", pci_name(ioc->pdev));
6016 if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
6018 } else if (ret == -ENOMEM)
6020 total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
6021 DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
6022 ioc->scsiio_depth = ioc->hba_queue_depth -
6023 ioc->hi_priority_depth - ioc->internal_depth;
6025 /* set the scsi host can_queue depth
6026 * with some internal commands that could be outstanding
6028 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
6030 ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
6031 ioc->shost->can_queue));
6033 /* contiguous pool for request and chains, 16 byte align, one extra "
6036 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
6037 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
6039 /* hi-priority queue */
6040 sz += (ioc->hi_priority_depth * ioc->request_sz);
6042 /* internal queue */
6043 sz += (ioc->internal_depth * ioc->request_sz);
6045 ioc->request_dma_sz = sz;
6046 ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
6047 &ioc->request_dma, GFP_KERNEL);
6048 if (!ioc->request) {
6049 ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
6050 ioc->hba_queue_depth, ioc->chains_needed_per_io,
6051 ioc->request_sz, sz / 1024);
6052 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
6055 ioc->hba_queue_depth -= retry_sz;
6056 _base_release_memory_pools(ioc);
6057 goto retry_allocation;
6061 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
6062 ioc->hba_queue_depth, ioc->chains_needed_per_io,
6063 ioc->request_sz, sz / 1024);
6065 /* hi-priority queue */
6066 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
6068 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
6071 /* internal queue */
6072 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
6074 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
6078 "request pool(0x%p) - dma(0x%llx): "
6079 "depth(%d), frame_size(%d), pool_size(%d kB)\n",
6080 ioc->request, (unsigned long long) ioc->request_dma,
6081 ioc->hba_queue_depth, ioc->request_sz,
6082 (ioc->hba_queue_depth * ioc->request_sz) / 1024);
6087 ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
6088 ioc->request, ioc->scsiio_depth));
6090 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
6091 sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
6092 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
6093 if (!ioc->chain_lookup) {
6094 ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
6098 sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
6099 for (i = 0; i < ioc->scsiio_depth; i++) {
6100 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
6101 if (!ioc->chain_lookup[i].chains_per_smid) {
6102 ioc_err(ioc, "chain_lookup: kzalloc failed\n");
6107 /* initialize hi-priority queue smid's */
6108 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
6109 sizeof(struct request_tracker), GFP_KERNEL);
6110 if (!ioc->hpr_lookup) {
6111 ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
6114 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
6116 ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
6118 ioc->hi_priority_depth, ioc->hi_priority_smid));
6120 /* initialize internal queue smid's */
6121 ioc->internal_lookup = kcalloc(ioc->internal_depth,
6122 sizeof(struct request_tracker), GFP_KERNEL);
6123 if (!ioc->internal_lookup) {
6124 ioc_err(ioc, "internal_lookup: kcalloc failed\n");
6127 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
6129 ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
6131 ioc->internal_depth, ioc->internal_smid));
6133 ioc->io_queue_num = kcalloc(ioc->scsiio_depth,
6134 sizeof(u16), GFP_KERNEL);
6135 if (!ioc->io_queue_num)
6138 * The number of NVMe page sized blocks needed is:
6139 * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
6140 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
6141 * that is placed in the main message frame. 8 is the size of each PRP
6142 * entry or PRP list pointer entry. 8 is subtracted from page_size
6143 * because of the PRP list pointer entry at the end of a page, so this
6144 * is not counted as a PRP entry. The 1 added page is a round up.
6146 * To avoid allocation failures due to the amount of memory that could
6147 * be required for NVMe PRP's, only each set of NVMe blocks will be
6148 * contiguous, so a new set is allocated for each possible I/O.
6151 ioc->chains_per_prp_buffer = 0;
6152 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
6153 nvme_blocks_needed =
6154 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
6155 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
6156 nvme_blocks_needed++;
6158 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
6159 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
6160 if (!ioc->pcie_sg_lookup) {
6161 ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
6164 sz = nvme_blocks_needed * ioc->page_size;
6165 rc = _base_allocate_pcie_sgl_pool(ioc, sz);
6168 else if (rc == -EAGAIN)
6170 total_sz += sz * ioc->scsiio_depth;
6173 rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz);
6176 else if (rc == -EAGAIN)
6178 total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io -
6179 ioc->chains_per_prp_buffer) * ioc->scsiio_depth);
6181 ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
6182 ioc->chain_depth, ioc->chain_segment_sz,
6183 (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
6184 /* sense buffers, 4 byte align */
6185 sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
6186 rc = _base_allocate_sense_dma_pool(ioc, sense_sz);
6189 else if (rc == -EAGAIN)
6191 total_sz += sense_sz;
6193 "sense pool(0x%p)- dma(0x%llx): depth(%d),"
6194 "element_size(%d), pool_size(%d kB)\n",
6195 ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
6196 SCSI_SENSE_BUFFERSIZE, sz / 1024);
6197 /* reply pool, 4 byte align */
6198 sz = ioc->reply_free_queue_depth * ioc->reply_sz;
6199 rc = _base_allocate_reply_pool(ioc, sz);
6202 else if (rc == -EAGAIN)
6206 /* reply free queue, 16 byte align */
6207 sz = ioc->reply_free_queue_depth * 4;
6208 rc = _base_allocate_reply_free_dma_pool(ioc, sz);
6211 else if (rc == -EAGAIN)
6214 ioc_info(ioc, "reply_free_dma (0x%llx)\n",
6215 (unsigned long long)ioc->reply_free_dma));
6217 if (ioc->rdpq_array_enable) {
6218 reply_post_free_array_sz = ioc->reply_queue_count *
6219 sizeof(Mpi2IOCInitRDPQArrayEntry);
6220 rc = _base_allocate_reply_post_free_array(ioc,
6221 reply_post_free_array_sz);
6224 else if (rc == -EAGAIN)
6227 ioc->config_page_sz = 512;
6228 ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
6229 ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
6230 if (!ioc->config_page) {
6231 ioc_err(ioc, "config page: dma_pool_alloc failed\n");
6235 ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
6236 ioc->config_page, (unsigned long long)ioc->config_page_dma,
6237 ioc->config_page_sz);
6238 total_sz += ioc->config_page_sz;
6240 ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
6242 ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
6243 ioc->shost->can_queue, facts->RequestCredit);
6244 ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
6245 ioc->shost->sg_tablesize);
6249 _base_release_memory_pools(ioc);
6250 if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
6251 /* Change dma coherent mask to 32 bit and reallocate */
6252 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
6253 pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
6254 pci_name(ioc->pdev));
6257 } else if (_base_reduce_hba_queue_depth(ioc) != 0)
6259 goto retry_allocation;
6266 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
6267 * @ioc: Pointer to MPT_ADAPTER structure
6268 * @cooked: Request raw or cooked IOC state
6270 * Return: all IOC Doorbell register bits if cooked==0, else just the
6271 * Doorbell bits in MPI_IOC_STATE_MASK.
6274 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
6278 s = ioc->base_readl(&ioc->chip->Doorbell);
6279 sc = s & MPI2_IOC_STATE_MASK;
6280 return cooked ? sc : s;
6284 * _base_wait_on_iocstate - waiting on a particular ioc state
6286 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
6287 * @timeout: timeout in second
6289 * Return: 0 for success, non-zero for failure.
6292 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
6298 cntdn = 1000 * timeout;
6300 current_state = mpt3sas_base_get_iocstate(ioc, 1);
6301 if (current_state == ioc_state)
6303 if (count && current_state == MPI2_IOC_STATE_FAULT)
6305 if (count && current_state == MPI2_IOC_STATE_COREDUMP)
6308 usleep_range(1000, 1500);
6312 return current_state;
6316 * _base_dump_reg_set - This function will print hexdump of register set.
6317 * @ioc: per adapter object
6322 _base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
6324 unsigned int i, sz = 256;
6325 u32 __iomem *reg = (u32 __iomem *)ioc->chip;
6327 ioc_info(ioc, "System Register set:\n");
6328 for (i = 0; i < (sz / sizeof(u32)); i++)
6329 pr_info("%08x: %08x\n", (i * 4), readl(®[i]));
6333 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
6334 * a write to the doorbell)
6335 * @ioc: per adapter object
6336 * @timeout: timeout in seconds
6338 * Return: 0 for success, non-zero for failure.
6340 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
6344 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
6350 cntdn = 1000 * timeout;
6352 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6353 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6355 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6356 __func__, count, timeout));
6360 usleep_range(1000, 1500);
6364 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6365 __func__, count, int_status);
6370 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
6376 cntdn = 2000 * timeout;
6378 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6379 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6381 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6382 __func__, count, timeout));
6390 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6391 __func__, count, int_status);
6397 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
6398 * @ioc: per adapter object
6399 * @timeout: timeout in second
6401 * Return: 0 for success, non-zero for failure.
6403 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
6407 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
6414 cntdn = 1000 * timeout;
6416 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6417 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
6419 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6420 __func__, count, timeout));
6422 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6423 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
6424 if ((doorbell & MPI2_IOC_STATE_MASK) ==
6425 MPI2_IOC_STATE_FAULT) {
6426 mpt3sas_print_fault_code(ioc, doorbell);
6429 if ((doorbell & MPI2_IOC_STATE_MASK) ==
6430 MPI2_IOC_STATE_COREDUMP) {
6431 mpt3sas_print_coredump_info(ioc, doorbell);
6434 } else if (int_status == 0xFFFFFFFF)
6437 usleep_range(1000, 1500);
6442 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6443 __func__, count, int_status);
6448 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
6449 * @ioc: per adapter object
6450 * @timeout: timeout in second
6452 * Return: 0 for success, non-zero for failure.
6455 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
6461 cntdn = 1000 * timeout;
6463 doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
6464 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
6466 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6467 __func__, count, timeout));
6471 usleep_range(1000, 1500);
6475 ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
6476 __func__, count, doorbell_reg);
6481 * _base_send_ioc_reset - send doorbell reset
6482 * @ioc: per adapter object
6483 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
6484 * @timeout: timeout in second
6486 * Return: 0 for success, non-zero for failure.
6489 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
6493 unsigned long flags;
6495 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
6496 ioc_err(ioc, "%s: unknown reset_type\n", __func__);
6500 if (!(ioc->facts.IOCCapabilities &
6501 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
6504 ioc_info(ioc, "sending message unit reset !!\n");
6506 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
6507 &ioc->chip->Doorbell);
6508 if ((_base_wait_for_doorbell_ack(ioc, 15))) {
6513 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6515 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6516 __func__, ioc_state);
6522 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6523 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
6525 * Wait for IOC state CoreDump to clear only during
6526 * HBA initialization & release time.
6528 if ((ioc_state & MPI2_IOC_STATE_MASK) ==
6529 MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
6530 ioc->fault_reset_work_q == NULL)) {
6531 spin_unlock_irqrestore(
6532 &ioc->ioc_reset_in_progress_lock, flags);
6533 mpt3sas_print_coredump_info(ioc, ioc_state);
6534 mpt3sas_base_wait_for_coredump_completion(ioc,
6537 &ioc->ioc_reset_in_progress_lock, flags);
6539 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
6541 ioc_info(ioc, "message unit reset: %s\n",
6542 r == 0 ? "SUCCESS" : "FAILED");
6547 * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
6548 * @ioc: per adapter object
6549 * @timeout: timeout in seconds
6551 * Return: Waits up to timeout seconds for the IOC to
6552 * become operational. Returns 0 if IOC is present
6553 * and operational; otherwise returns %-EFAULT.
6557 mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
6559 int wait_state_count = 0;
6563 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
6564 if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
6568 * Watchdog thread will be started after IOC Initialization, so
6569 * no need to wait here for IOC state to become operational
6570 * when IOC Initialization is on. Instead the driver will
6571 * return ETIME status, so that calling function can issue
6572 * diag reset operation and retry the command.
6574 if (ioc->is_driver_loading)
6578 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
6579 __func__, ++wait_state_count);
6580 } while (--timeout);
6582 ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
6585 if (wait_state_count)
6586 ioc_info(ioc, "ioc is operational\n");
6591 * _base_handshake_req_reply_wait - send request thru doorbell interface
6592 * @ioc: per adapter object
6593 * @request_bytes: request length
6594 * @request: pointer having request payload
6595 * @reply_bytes: reply length
6596 * @reply: pointer to reply payload
6597 * @timeout: timeout in second
6599 * Return: 0 for success, non-zero for failure.
6602 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
6603 u32 *request, int reply_bytes, u16 *reply, int timeout)
6605 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
6610 /* make sure doorbell is not in use */
6611 if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
6612 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
6616 /* clear pending doorbell interrupts from previous state changes */
6617 if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
6618 MPI2_HIS_IOC2SYS_DB_STATUS)
6619 writel(0, &ioc->chip->HostInterruptStatus);
6621 /* send message to ioc */
6622 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
6623 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
6624 &ioc->chip->Doorbell);
6626 if ((_base_spin_on_doorbell_int(ioc, 5))) {
6627 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6631 writel(0, &ioc->chip->HostInterruptStatus);
6633 if ((_base_wait_for_doorbell_ack(ioc, 5))) {
6634 ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
6639 /* send message 32-bits at a time */
6640 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
6641 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
6642 if ((_base_wait_for_doorbell_ack(ioc, 5)))
6647 ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
6652 /* now wait for the reply */
6653 if ((_base_wait_for_doorbell_int(ioc, timeout))) {
6654 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6659 /* read the first two 16-bits, it gives the total length of the reply */
6660 reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
6661 & MPI2_DOORBELL_DATA_MASK);
6662 writel(0, &ioc->chip->HostInterruptStatus);
6663 if ((_base_wait_for_doorbell_int(ioc, 5))) {
6664 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6668 reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
6669 & MPI2_DOORBELL_DATA_MASK);
6670 writel(0, &ioc->chip->HostInterruptStatus);
6672 for (i = 2; i < default_reply->MsgLength * 2; i++) {
6673 if ((_base_wait_for_doorbell_int(ioc, 5))) {
6674 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6678 if (i >= reply_bytes/2) /* overflow case */
6679 ioc->base_readl(&ioc->chip->Doorbell);
6681 reply[i] = le16_to_cpu(
6682 ioc->base_readl(&ioc->chip->Doorbell)
6683 & MPI2_DOORBELL_DATA_MASK);
6684 writel(0, &ioc->chip->HostInterruptStatus);
6687 _base_wait_for_doorbell_int(ioc, 5);
6688 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
6690 ioc_info(ioc, "doorbell is in use (line=%d)\n",
6693 writel(0, &ioc->chip->HostInterruptStatus);
6695 if (ioc->logging_level & MPT_DEBUG_INIT) {
6696 mfp = (__le32 *)reply;
6697 pr_info("\toffset:data\n");
6698 for (i = 0; i < reply_bytes/4; i++)
6699 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
6700 le32_to_cpu(mfp[i]));
6706 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
6707 * @ioc: per adapter object
6708 * @mpi_reply: the reply payload from FW
6709 * @mpi_request: the request payload sent to FW
6711 * The SAS IO Unit Control Request message allows the host to perform low-level
6712 * operations, such as resets on the PHYs of the IO Unit, also allows the host
6713 * to obtain the IOC assigned device handles for a device if it has other
6714 * identifying information about the device, in addition allows the host to
6715 * remove IOC resources associated with the device.
6717 * Return: 0 for success, non-zero for failure.
6720 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
6721 Mpi2SasIoUnitControlReply_t *mpi_reply,
6722 Mpi2SasIoUnitControlRequest_t *mpi_request)
6729 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6731 mutex_lock(&ioc->base_cmds.mutex);
6733 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6734 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6739 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6743 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6745 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6751 ioc->base_cmds.status = MPT3_CMD_PENDING;
6752 request = mpt3sas_base_get_msg_frame(ioc, smid);
6753 ioc->base_cmds.smid = smid;
6754 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
6755 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6756 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
6757 ioc->ioc_link_reset_in_progress = 1;
6758 init_completion(&ioc->base_cmds.done);
6759 ioc->put_smid_default(ioc, smid);
6760 wait_for_completion_timeout(&ioc->base_cmds.done,
6761 msecs_to_jiffies(10000));
6762 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6763 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
6764 ioc->ioc_link_reset_in_progress)
6765 ioc->ioc_link_reset_in_progress = 0;
6766 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6767 mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
6768 mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
6770 goto issue_host_reset;
6772 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6773 memcpy(mpi_reply, ioc->base_cmds.reply,
6774 sizeof(Mpi2SasIoUnitControlReply_t));
6776 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
6777 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6782 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6783 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6786 mutex_unlock(&ioc->base_cmds.mutex);
6791 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
6792 * @ioc: per adapter object
6793 * @mpi_reply: the reply payload from FW
6794 * @mpi_request: the request payload sent to FW
6796 * The SCSI Enclosure Processor request message causes the IOC to
6797 * communicate with SES devices to control LED status signals.
6799 * Return: 0 for success, non-zero for failure.
6802 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
6803 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
6810 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6812 mutex_lock(&ioc->base_cmds.mutex);
6814 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6815 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6820 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6824 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6826 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6832 ioc->base_cmds.status = MPT3_CMD_PENDING;
6833 request = mpt3sas_base_get_msg_frame(ioc, smid);
6834 ioc->base_cmds.smid = smid;
6835 memset(request, 0, ioc->request_sz);
6836 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
6837 init_completion(&ioc->base_cmds.done);
6838 ioc->put_smid_default(ioc, smid);
6839 wait_for_completion_timeout(&ioc->base_cmds.done,
6840 msecs_to_jiffies(10000));
6841 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6842 mpt3sas_check_cmd_timeout(ioc,
6843 ioc->base_cmds.status, mpi_request,
6844 sizeof(Mpi2SepRequest_t)/4, issue_reset);
6845 goto issue_host_reset;
6847 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6848 memcpy(mpi_reply, ioc->base_cmds.reply,
6849 sizeof(Mpi2SepReply_t));
6851 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
6852 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6857 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6858 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6861 mutex_unlock(&ioc->base_cmds.mutex);
6866 * _base_get_port_facts - obtain port facts reply and save in ioc
6867 * @ioc: per adapter object
6870 * Return: 0 for success, non-zero for failure.
6873 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
6875 Mpi2PortFactsRequest_t mpi_request;
6876 Mpi2PortFactsReply_t mpi_reply;
6877 struct mpt3sas_port_facts *pfacts;
6878 int mpi_reply_sz, mpi_request_sz, r;
6880 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6882 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
6883 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
6884 memset(&mpi_request, 0, mpi_request_sz);
6885 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
6886 mpi_request.PortNumber = port;
6887 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6888 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6891 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6895 pfacts = &ioc->pfacts[port];
6896 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
6897 pfacts->PortNumber = mpi_reply.PortNumber;
6898 pfacts->VP_ID = mpi_reply.VP_ID;
6899 pfacts->VF_ID = mpi_reply.VF_ID;
6900 pfacts->MaxPostedCmdBuffers =
6901 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
6907 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
6908 * @ioc: per adapter object
6911 * Return: 0 for success, non-zero for failure.
6914 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
6919 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6921 if (ioc->pci_error_recovery) {
6923 ioc_info(ioc, "%s: host in pci error recovery\n",
6928 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6930 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6931 __func__, ioc_state));
6933 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
6934 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6937 if (ioc_state & MPI2_DOORBELL_USED) {
6938 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
6939 goto issue_diag_reset;
6942 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6943 mpt3sas_print_fault_code(ioc, ioc_state &
6944 MPI2_DOORBELL_DATA_MASK);
6945 goto issue_diag_reset;
6946 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
6947 MPI2_IOC_STATE_COREDUMP) {
6949 "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
6950 __func__, ioc_state);
6954 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6957 ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6958 __func__, ioc_state));
6963 rc = _base_diag_reset(ioc);
6968 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
6969 * @ioc: per adapter object
6971 * Return: 0 for success, non-zero for failure.
6974 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
6976 Mpi2IOCFactsRequest_t mpi_request;
6977 Mpi2IOCFactsReply_t mpi_reply;
6978 struct mpt3sas_facts *facts;
6979 int mpi_reply_sz, mpi_request_sz, r;
6981 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6983 r = _base_wait_for_iocstate(ioc, 10);
6986 ioc_info(ioc, "%s: failed getting to correct state\n",
6990 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
6991 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
6992 memset(&mpi_request, 0, mpi_request_sz);
6993 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
6994 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6995 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6998 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7002 facts = &ioc->facts;
7003 memset(facts, 0, sizeof(struct mpt3sas_facts));
7004 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
7005 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
7006 facts->VP_ID = mpi_reply.VP_ID;
7007 facts->VF_ID = mpi_reply.VF_ID;
7008 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
7009 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
7010 facts->WhoInit = mpi_reply.WhoInit;
7011 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
7012 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
7013 if (ioc->msix_enable && (facts->MaxMSIxVectors <=
7014 MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
7015 ioc->combined_reply_queue = 0;
7016 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
7017 facts->MaxReplyDescriptorPostQueueDepth =
7018 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
7019 facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
7020 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
7021 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
7022 ioc->ir_firmware = 1;
7023 if ((facts->IOCCapabilities &
7024 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
7025 ioc->rdpq_array_capable = 1;
7026 if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
7027 && ioc->is_aero_ioc)
7028 ioc->atomic_desc_capable = 1;
7029 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
7030 facts->IOCRequestFrameSize =
7031 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
7032 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
7033 facts->IOCMaxChainSegmentSize =
7034 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
7036 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
7037 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
7038 ioc->shost->max_id = -1;
7039 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
7040 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
7041 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
7042 facts->HighPriorityCredit =
7043 le16_to_cpu(mpi_reply.HighPriorityCredit);
7044 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
7045 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
7046 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
7049 * Get the Page Size from IOC Facts. If it's 0, default to 4k.
7051 ioc->page_size = 1 << facts->CurrentHostPageSize;
7052 if (ioc->page_size == 1) {
7053 ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
7054 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
7057 ioc_info(ioc, "CurrentHostPageSize(%d)\n",
7058 facts->CurrentHostPageSize));
7061 ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
7062 facts->RequestCredit, facts->MaxChainDepth));
7064 ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
7065 facts->IOCRequestFrameSize * 4,
7066 facts->ReplyFrameSize * 4));
7071 * _base_send_ioc_init - send ioc_init to firmware
7072 * @ioc: per adapter object
7074 * Return: 0 for success, non-zero for failure.
7077 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
7079 Mpi2IOCInitRequest_t mpi_request;
7080 Mpi2IOCInitReply_t mpi_reply;
7082 ktime_t current_time;
7084 u32 reply_post_free_array_sz = 0;
7086 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7088 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
7089 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
7090 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
7091 mpi_request.VF_ID = 0; /* TODO */
7092 mpi_request.VP_ID = 0;
7093 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
7094 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
7095 mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
7097 if (_base_is_controller_msix_enabled(ioc))
7098 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
7099 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
7100 mpi_request.ReplyDescriptorPostQueueDepth =
7101 cpu_to_le16(ioc->reply_post_queue_depth);
7102 mpi_request.ReplyFreeQueueDepth =
7103 cpu_to_le16(ioc->reply_free_queue_depth);
7105 mpi_request.SenseBufferAddressHigh =
7106 cpu_to_le32((u64)ioc->sense_dma >> 32);
7107 mpi_request.SystemReplyAddressHigh =
7108 cpu_to_le32((u64)ioc->reply_dma >> 32);
7109 mpi_request.SystemRequestFrameBaseAddress =
7110 cpu_to_le64((u64)ioc->request_dma);
7111 mpi_request.ReplyFreeQueueAddress =
7112 cpu_to_le64((u64)ioc->reply_free_dma);
7114 if (ioc->rdpq_array_enable) {
7115 reply_post_free_array_sz = ioc->reply_queue_count *
7116 sizeof(Mpi2IOCInitRDPQArrayEntry);
7117 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
7118 for (i = 0; i < ioc->reply_queue_count; i++)
7119 ioc->reply_post_free_array[i].RDPQBaseAddress =
7121 (u64)ioc->reply_post[i].reply_post_free_dma);
7122 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
7123 mpi_request.ReplyDescriptorPostQueueAddress =
7124 cpu_to_le64((u64)ioc->reply_post_free_array_dma);
7126 mpi_request.ReplyDescriptorPostQueueAddress =
7127 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
7131 * Set the flag to enable CoreDump state feature in IOC firmware.
7133 mpi_request.ConfigurationFlags |=
7134 cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
7136 /* This time stamp specifies number of milliseconds
7137 * since epoch ~ midnight January 1, 1970.
7139 current_time = ktime_get_real();
7140 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
7142 if (ioc->logging_level & MPT_DEBUG_INIT) {
7146 mfp = (__le32 *)&mpi_request;
7147 ioc_info(ioc, "\toffset:data\n");
7148 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
7149 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
7150 le32_to_cpu(mfp[i]));
7153 r = _base_handshake_req_reply_wait(ioc,
7154 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
7155 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30);
7158 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7162 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7163 if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
7164 mpi_reply.IOCLogInfo) {
7165 ioc_err(ioc, "%s: failed\n", __func__);
7169 /* Reset TimeSync Counter*/
7170 ioc->timestamp_update_count = 0;
7175 * mpt3sas_port_enable_done - command completion routine for port enable
7176 * @ioc: per adapter object
7177 * @smid: system request message index
7178 * @msix_index: MSIX table index supplied by the OS
7179 * @reply: reply message frame(lower 32bit addr)
7181 * Return: 1 meaning mf should be freed from _base_interrupt
7182 * 0 means the mf is freed from this function.
7185 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
7188 MPI2DefaultReply_t *mpi_reply;
7191 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
7194 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
7198 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
7201 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
7202 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
7203 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
7204 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
7205 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
7206 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7207 ioc->port_enable_failed = 1;
7209 if (ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE_ASYNC) {
7210 ioc->port_enable_cmds.status &= ~MPT3_CMD_COMPLETE_ASYNC;
7211 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
7212 mpt3sas_port_enable_complete(ioc);
7215 ioc->start_scan_failed = ioc_status;
7216 ioc->start_scan = 0;
7220 complete(&ioc->port_enable_cmds.done);
7225 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
7226 * @ioc: per adapter object
7228 * Return: 0 for success, non-zero for failure.
7231 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
7233 Mpi2PortEnableRequest_t *mpi_request;
7234 Mpi2PortEnableReply_t *mpi_reply;
7239 ioc_info(ioc, "sending port enable !!\n");
7241 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7242 ioc_err(ioc, "%s: internal command already in use\n", __func__);
7246 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
7248 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7252 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
7253 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7254 ioc->port_enable_cmds.smid = smid;
7255 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
7256 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
7258 init_completion(&ioc->port_enable_cmds.done);
7259 ioc->put_smid_default(ioc, smid);
7260 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
7261 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
7262 ioc_err(ioc, "%s: timeout\n", __func__);
7263 _debug_dump_mf(mpi_request,
7264 sizeof(Mpi2PortEnableRequest_t)/4);
7265 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
7272 mpi_reply = ioc->port_enable_cmds.reply;
7273 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
7274 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7275 ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
7276 __func__, ioc_status);
7282 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
7283 ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
7288 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
7289 * @ioc: per adapter object
7291 * Return: 0 for success, non-zero for failure.
7294 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
7296 Mpi2PortEnableRequest_t *mpi_request;
7299 ioc_info(ioc, "sending port enable !!\n");
7301 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7302 ioc_err(ioc, "%s: internal command already in use\n", __func__);
7306 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
7308 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7311 ioc->drv_internal_flags |= MPT_DRV_INTERNAL_FIRST_PE_ISSUED;
7312 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
7313 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE_ASYNC;
7314 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7315 ioc->port_enable_cmds.smid = smid;
7316 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
7317 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
7319 ioc->put_smid_default(ioc, smid);
7324 * _base_determine_wait_on_discovery - desposition
7325 * @ioc: per adapter object
7327 * Decide whether to wait on discovery to complete. Used to either
7328 * locate boot device, or report volumes ahead of physical devices.
7330 * Return: 1 for wait, 0 for don't wait.
7333 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
7335 /* We wait for discovery to complete if IR firmware is loaded.
7336 * The sas topology events arrive before PD events, so we need time to
7337 * turn on the bit in ioc->pd_handles to indicate PD
7338 * Also, it maybe required to report Volumes ahead of physical
7339 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
7341 if (ioc->ir_firmware)
7344 /* if no Bios, then we don't need to wait */
7345 if (!ioc->bios_pg3.BiosVersion)
7348 /* Bios is present, then we drop down here.
7350 * If there any entries in the Bios Page 2, then we wait
7351 * for discovery to complete.
7354 /* Current Boot Device */
7355 if ((ioc->bios_pg2.CurrentBootDeviceForm &
7356 MPI2_BIOSPAGE2_FORM_MASK) ==
7357 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
7358 /* Request Boot Device */
7359 (ioc->bios_pg2.ReqBootDeviceForm &
7360 MPI2_BIOSPAGE2_FORM_MASK) ==
7361 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
7362 /* Alternate Request Boot Device */
7363 (ioc->bios_pg2.ReqAltBootDeviceForm &
7364 MPI2_BIOSPAGE2_FORM_MASK) ==
7365 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
7372 * _base_unmask_events - turn on notification for this event
7373 * @ioc: per adapter object
7374 * @event: firmware event
7376 * The mask is stored in ioc->event_masks.
7379 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
7386 desired_event = (1 << (event % 32));
7389 ioc->event_masks[0] &= ~desired_event;
7390 else if (event < 64)
7391 ioc->event_masks[1] &= ~desired_event;
7392 else if (event < 96)
7393 ioc->event_masks[2] &= ~desired_event;
7394 else if (event < 128)
7395 ioc->event_masks[3] &= ~desired_event;
7399 * _base_event_notification - send event notification
7400 * @ioc: per adapter object
7402 * Return: 0 for success, non-zero for failure.
7405 _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
7407 Mpi2EventNotificationRequest_t *mpi_request;
7410 int i, issue_diag_reset = 0;
7412 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7414 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
7415 ioc_err(ioc, "%s: internal command already in use\n", __func__);
7419 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
7421 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7424 ioc->base_cmds.status = MPT3_CMD_PENDING;
7425 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7426 ioc->base_cmds.smid = smid;
7427 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
7428 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
7429 mpi_request->VF_ID = 0; /* TODO */
7430 mpi_request->VP_ID = 0;
7431 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
7432 mpi_request->EventMasks[i] =
7433 cpu_to_le32(ioc->event_masks[i]);
7434 init_completion(&ioc->base_cmds.done);
7435 ioc->put_smid_default(ioc, smid);
7436 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
7437 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
7438 ioc_err(ioc, "%s: timeout\n", __func__);
7439 _debug_dump_mf(mpi_request,
7440 sizeof(Mpi2EventNotificationRequest_t)/4);
7441 if (ioc->base_cmds.status & MPT3_CMD_RESET)
7444 issue_diag_reset = 1;
7447 dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
7448 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7450 if (issue_diag_reset) {
7451 if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
7453 if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
7461 * mpt3sas_base_validate_event_type - validating event types
7462 * @ioc: per adapter object
7463 * @event_type: firmware event
7465 * This will turn on firmware event notification when application
7466 * ask for that event. We don't mask events that are already enabled.
7469 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
7472 u32 event_mask, desired_event;
7473 u8 send_update_to_fw;
7475 for (i = 0, send_update_to_fw = 0; i <
7476 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
7477 event_mask = ~event_type[i];
7479 for (j = 0; j < 32; j++) {
7480 if (!(event_mask & desired_event) &&
7481 (ioc->event_masks[i] & desired_event)) {
7482 ioc->event_masks[i] &= ~desired_event;
7483 send_update_to_fw = 1;
7485 desired_event = (desired_event << 1);
7489 if (!send_update_to_fw)
7492 mutex_lock(&ioc->base_cmds.mutex);
7493 _base_event_notification(ioc);
7494 mutex_unlock(&ioc->base_cmds.mutex);
7498 * _base_diag_reset - the "big hammer" start of day reset
7499 * @ioc: per adapter object
7501 * Return: 0 for success, non-zero for failure.
7504 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
7506 u32 host_diagnostic;
7511 ioc_info(ioc, "sending diag reset !!\n");
7513 pci_cfg_access_lock(ioc->pdev);
7515 drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
7519 /* Write magic sequence to WriteSequence register
7520 * Loop until in diagnostic mode
7522 drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
7523 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
7524 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
7525 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
7526 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
7527 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
7528 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
7529 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
7536 "Stop writing magic sequence after 20 retries\n");
7537 _base_dump_reg_set(ioc);
7541 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
7543 ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
7544 count, host_diagnostic));
7546 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
7548 hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
7550 drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
7551 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
7552 &ioc->chip->HostDiagnostic);
7554 /*This delay allows the chip PCIe hardware time to finish reset tasks*/
7555 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
7557 /* Approximately 300 second max wait */
7558 for (count = 0; count < (300000000 /
7559 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
7561 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
7563 if (host_diagnostic == 0xFFFFFFFF) {
7565 "Invalid host diagnostic register value\n");
7566 _base_dump_reg_set(ioc);
7569 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
7572 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
7575 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
7578 ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
7579 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
7580 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
7581 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
7583 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
7584 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
7585 &ioc->chip->HCBSize);
7588 drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
7589 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
7590 &ioc->chip->HostDiagnostic);
7593 ioc_info(ioc, "disable writes to the diagnostic register\n"));
7594 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
7596 drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
7597 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
7599 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
7600 __func__, ioc_state);
7601 _base_dump_reg_set(ioc);
7605 pci_cfg_access_unlock(ioc->pdev);
7606 ioc_info(ioc, "diag reset: SUCCESS\n");
7610 pci_cfg_access_unlock(ioc->pdev);
7611 ioc_err(ioc, "diag reset: FAILED\n");
7616 * _base_make_ioc_ready - put controller in READY state
7617 * @ioc: per adapter object
7618 * @type: FORCE_BIG_HAMMER or SOFT_RESET
7620 * Return: 0 for success, non-zero for failure.
7623 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
7629 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7631 if (ioc->pci_error_recovery)
7634 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7636 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
7637 __func__, ioc_state));
7639 /* if in RESET state, it should move to READY state shortly */
7641 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
7642 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
7643 MPI2_IOC_STATE_READY) {
7644 if (count++ == 10) {
7645 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
7646 __func__, ioc_state);
7650 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7654 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
7657 if (ioc_state & MPI2_DOORBELL_USED) {
7658 ioc_info(ioc, "unexpected doorbell active!\n");
7659 goto issue_diag_reset;
7662 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
7663 mpt3sas_print_fault_code(ioc, ioc_state &
7664 MPI2_DOORBELL_DATA_MASK);
7665 goto issue_diag_reset;
7668 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
7670 * if host reset is invoked while watch dog thread is waiting
7671 * for IOC state to be changed to Fault state then driver has
7672 * to wait here for CoreDump state to clear otherwise reset
7673 * will be issued to the FW and FW move the IOC state to
7674 * reset state without copying the FW logs to coredump region.
7676 if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
7677 mpt3sas_print_coredump_info(ioc, ioc_state &
7678 MPI2_DOORBELL_DATA_MASK);
7679 mpt3sas_base_wait_for_coredump_completion(ioc,
7682 goto issue_diag_reset;
7685 if (type == FORCE_BIG_HAMMER)
7686 goto issue_diag_reset;
7688 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
7689 if (!(_base_send_ioc_reset(ioc,
7690 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
7695 rc = _base_diag_reset(ioc);
7700 * _base_make_ioc_operational - put controller in OPERATIONAL state
7701 * @ioc: per adapter object
7703 * Return: 0 for success, non-zero for failure.
7706 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
7708 int r, i, index, rc;
7709 unsigned long flags;
7712 struct _tr_list *delayed_tr, *delayed_tr_next;
7713 struct _sc_list *delayed_sc, *delayed_sc_next;
7714 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
7716 struct adapter_reply_queue *reply_q;
7717 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
7719 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7721 /* clean the delayed target reset list */
7722 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
7723 &ioc->delayed_tr_list, list) {
7724 list_del(&delayed_tr->list);
7729 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
7730 &ioc->delayed_tr_volume_list, list) {
7731 list_del(&delayed_tr->list);
7735 list_for_each_entry_safe(delayed_sc, delayed_sc_next,
7736 &ioc->delayed_sc_list, list) {
7737 list_del(&delayed_sc->list);
7741 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
7742 &ioc->delayed_event_ack_list, list) {
7743 list_del(&delayed_event_ack->list);
7744 kfree(delayed_event_ack);
7747 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7749 /* hi-priority queue */
7750 INIT_LIST_HEAD(&ioc->hpr_free_list);
7751 smid = ioc->hi_priority_smid;
7752 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
7753 ioc->hpr_lookup[i].cb_idx = 0xFF;
7754 ioc->hpr_lookup[i].smid = smid;
7755 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
7756 &ioc->hpr_free_list);
7759 /* internal queue */
7760 INIT_LIST_HEAD(&ioc->internal_free_list);
7761 smid = ioc->internal_smid;
7762 for (i = 0; i < ioc->internal_depth; i++, smid++) {
7763 ioc->internal_lookup[i].cb_idx = 0xFF;
7764 ioc->internal_lookup[i].smid = smid;
7765 list_add_tail(&ioc->internal_lookup[i].tracker_list,
7766 &ioc->internal_free_list);
7769 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7771 /* initialize Reply Free Queue */
7772 for (i = 0, reply_address = (u32)ioc->reply_dma ;
7773 i < ioc->reply_free_queue_depth ; i++, reply_address +=
7775 ioc->reply_free[i] = cpu_to_le32(reply_address);
7776 if (ioc->is_mcpu_endpoint)
7777 _base_clone_reply_to_sys_mem(ioc,
7781 /* initialize reply queues */
7782 if (ioc->is_driver_loading)
7783 _base_assign_reply_queues(ioc);
7785 /* initialize Reply Post Free Queue */
7787 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
7788 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7790 * If RDPQ is enabled, switch to the next allocation.
7791 * Otherwise advance within the contiguous region.
7793 if (ioc->rdpq_array_enable) {
7794 reply_q->reply_post_free =
7795 ioc->reply_post[index++].reply_post_free;
7797 reply_q->reply_post_free = reply_post_free_contig;
7798 reply_post_free_contig += ioc->reply_post_queue_depth;
7801 reply_q->reply_post_host_index = 0;
7802 for (i = 0; i < ioc->reply_post_queue_depth; i++)
7803 reply_q->reply_post_free[i].Words =
7804 cpu_to_le64(ULLONG_MAX);
7805 if (!_base_is_controller_msix_enabled(ioc))
7806 goto skip_init_reply_post_free_queue;
7808 skip_init_reply_post_free_queue:
7810 r = _base_send_ioc_init(ioc);
7813 * No need to check IOC state for fault state & issue
7814 * diag reset during host reset. This check is need
7815 * only during driver load time.
7817 if (!ioc->is_driver_loading)
7820 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
7821 if (rc || (_base_send_ioc_init(ioc)))
7825 /* initialize reply free host index */
7826 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
7827 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
7829 /* initialize reply post host index */
7830 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7831 if (ioc->combined_reply_queue)
7832 writel((reply_q->msix_index & 7)<<
7833 MPI2_RPHI_MSIX_INDEX_SHIFT,
7834 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
7836 writel(reply_q->msix_index <<
7837 MPI2_RPHI_MSIX_INDEX_SHIFT,
7838 &ioc->chip->ReplyPostHostIndex);
7840 if (!_base_is_controller_msix_enabled(ioc))
7841 goto skip_init_reply_post_host_index;
7844 skip_init_reply_post_host_index:
7846 mpt3sas_base_unmask_interrupts(ioc);
7848 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
7849 r = _base_display_fwpkg_version(ioc);
7854 rc = _base_static_config_pages(ioc);
7858 r = _base_event_notification(ioc);
7862 if (!ioc->shost_recovery) {
7864 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
7867 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
7868 MFG_PAGE10_HIDE_SSDS_MASK);
7869 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
7870 ioc->mfg_pg10_hide_flag = hide_flag;
7873 ioc->wait_for_discovery_to_complete =
7874 _base_determine_wait_on_discovery(ioc);
7876 return r; /* scan_start and scan_finished support */
7879 r = _base_send_port_enable(ioc);
7887 * mpt3sas_base_free_resources - free resources controller resources
7888 * @ioc: per adapter object
7891 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
7893 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7895 /* synchronizing freeing resource with pci_access_mutex lock */
7896 mutex_lock(&ioc->pci_access_mutex);
7897 if (ioc->chip_phys && ioc->chip) {
7898 mpt3sas_base_mask_interrupts(ioc);
7899 ioc->shost_recovery = 1;
7900 _base_make_ioc_ready(ioc, SOFT_RESET);
7901 ioc->shost_recovery = 0;
7904 mpt3sas_base_unmap_resources(ioc);
7905 mutex_unlock(&ioc->pci_access_mutex);
7910 * mpt3sas_base_attach - attach controller instance
7911 * @ioc: per adapter object
7913 * Return: 0 for success, non-zero for failure.
7916 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
7919 int cpu_id, last_cpu_id = 0;
7921 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7923 /* setup cpu_msix_table */
7924 ioc->cpu_count = num_online_cpus();
7925 for_each_online_cpu(cpu_id)
7926 last_cpu_id = cpu_id;
7927 ioc->cpu_msix_table_sz = last_cpu_id + 1;
7928 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
7929 ioc->reply_queue_count = 1;
7930 if (!ioc->cpu_msix_table) {
7931 ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
7933 goto out_free_resources;
7936 if (ioc->is_warpdrive) {
7937 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
7938 sizeof(resource_size_t *), GFP_KERNEL);
7939 if (!ioc->reply_post_host_index) {
7940 ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
7942 goto out_free_resources;
7946 ioc->smp_affinity_enable = smp_affinity_enable;
7948 ioc->rdpq_array_enable_assigned = 0;
7949 ioc->use_32bit_dma = false;
7951 if (ioc->is_aero_ioc)
7952 ioc->base_readl = &_base_readl_aero;
7954 ioc->base_readl = &_base_readl;
7955 r = mpt3sas_base_map_resources(ioc);
7957 goto out_free_resources;
7959 pci_set_drvdata(ioc->pdev, ioc->shost);
7960 r = _base_get_ioc_facts(ioc);
7962 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
7963 if (rc || (_base_get_ioc_facts(ioc)))
7964 goto out_free_resources;
7967 switch (ioc->hba_mpi_version_belonged) {
7969 ioc->build_sg_scmd = &_base_build_sg_scmd;
7970 ioc->build_sg = &_base_build_sg;
7971 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
7972 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7978 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
7979 * Target Status - all require the IEEE formatted scatter gather
7982 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
7983 ioc->build_sg = &_base_build_sg_ieee;
7984 ioc->build_nvme_prp = &_base_build_nvme_prp;
7985 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
7986 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
7987 if (ioc->high_iops_queues)
7988 ioc->get_msix_index_for_smlio =
7989 &_base_get_high_iops_msix_index;
7991 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7994 if (ioc->atomic_desc_capable) {
7995 ioc->put_smid_default = &_base_put_smid_default_atomic;
7996 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
7997 ioc->put_smid_fast_path =
7998 &_base_put_smid_fast_path_atomic;
7999 ioc->put_smid_hi_priority =
8000 &_base_put_smid_hi_priority_atomic;
8002 ioc->put_smid_default = &_base_put_smid_default;
8003 ioc->put_smid_fast_path = &_base_put_smid_fast_path;
8004 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
8005 if (ioc->is_mcpu_endpoint)
8006 ioc->put_smid_scsi_io =
8007 &_base_put_smid_mpi_ep_scsi_io;
8009 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
8012 * These function pointers for other requests that don't
8013 * the require IEEE scatter gather elements.
8015 * For example Configuration Pages and SAS IOUNIT Control don't.
8017 ioc->build_sg_mpi = &_base_build_sg;
8018 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
8020 r = _base_make_ioc_ready(ioc, SOFT_RESET);
8022 goto out_free_resources;
8024 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
8025 sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
8028 goto out_free_resources;
8031 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
8032 r = _base_get_port_facts(ioc, i);
8034 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
8035 if (rc || (_base_get_port_facts(ioc, i)))
8036 goto out_free_resources;
8040 r = _base_allocate_memory_pools(ioc);
8042 goto out_free_resources;
8044 if (irqpoll_weight > 0)
8045 ioc->thresh_hold = irqpoll_weight;
8047 ioc->thresh_hold = ioc->hba_queue_depth/4;
8049 _base_init_irqpolls(ioc);
8050 init_waitqueue_head(&ioc->reset_wq);
8052 /* allocate memory pd handle bitmask list */
8053 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
8054 if (ioc->facts.MaxDevHandle % 8)
8055 ioc->pd_handles_sz++;
8056 ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
8058 if (!ioc->pd_handles) {
8060 goto out_free_resources;
8062 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
8064 if (!ioc->blocking_handles) {
8066 goto out_free_resources;
8069 /* allocate memory for pending OS device add list */
8070 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
8071 if (ioc->facts.MaxDevHandle % 8)
8072 ioc->pend_os_device_add_sz++;
8073 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
8075 if (!ioc->pend_os_device_add) {
8077 goto out_free_resources;
8080 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
8081 ioc->device_remove_in_progress =
8082 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
8083 if (!ioc->device_remove_in_progress) {
8085 goto out_free_resources;
8088 ioc->fwfault_debug = mpt3sas_fwfault_debug;
8090 /* base internal command bits */
8091 mutex_init(&ioc->base_cmds.mutex);
8092 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8093 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
8095 /* port_enable command bits */
8096 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8097 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
8099 /* transport internal command bits */
8100 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8101 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
8102 mutex_init(&ioc->transport_cmds.mutex);
8104 /* scsih internal command bits */
8105 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8106 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8107 mutex_init(&ioc->scsih_cmds.mutex);
8109 /* task management internal command bits */
8110 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8111 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
8112 mutex_init(&ioc->tm_cmds.mutex);
8114 /* config page internal command bits */
8115 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8116 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
8117 mutex_init(&ioc->config_cmds.mutex);
8119 /* ctl module internal command bits */
8120 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8121 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
8122 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
8123 mutex_init(&ioc->ctl_cmds.mutex);
8125 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
8126 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
8127 !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
8128 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
8130 goto out_free_resources;
8133 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
8134 ioc->event_masks[i] = -1;
8136 /* here we enable the events we care about */
8137 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
8138 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
8139 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
8140 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
8141 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
8142 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
8143 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
8144 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
8145 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
8146 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
8147 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
8148 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
8149 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
8150 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
8151 if (ioc->is_gen35_ioc) {
8152 _base_unmask_events(ioc,
8153 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
8154 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
8155 _base_unmask_events(ioc,
8156 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
8159 r = _base_make_ioc_operational(ioc);
8161 r = _base_make_ioc_operational(ioc);
8163 goto out_free_resources;
8167 * Copy current copy of IOCFacts in prev_fw_facts
8168 * and it will be used during online firmware upgrade.
8170 memcpy(&ioc->prev_fw_facts, &ioc->facts,
8171 sizeof(struct mpt3sas_facts));
8173 ioc->non_operational_loop = 0;
8174 ioc->ioc_coredump_loop = 0;
8175 ioc->got_task_abort_from_ioctl = 0;
8180 ioc->remove_host = 1;
8182 mpt3sas_base_free_resources(ioc);
8183 _base_release_memory_pools(ioc);
8184 pci_set_drvdata(ioc->pdev, NULL);
8185 kfree(ioc->cpu_msix_table);
8186 if (ioc->is_warpdrive)
8187 kfree(ioc->reply_post_host_index);
8188 kfree(ioc->pd_handles);
8189 kfree(ioc->blocking_handles);
8190 kfree(ioc->device_remove_in_progress);
8191 kfree(ioc->pend_os_device_add);
8192 kfree(ioc->tm_cmds.reply);
8193 kfree(ioc->transport_cmds.reply);
8194 kfree(ioc->scsih_cmds.reply);
8195 kfree(ioc->config_cmds.reply);
8196 kfree(ioc->base_cmds.reply);
8197 kfree(ioc->port_enable_cmds.reply);
8198 kfree(ioc->ctl_cmds.reply);
8199 kfree(ioc->ctl_cmds.sense);
8201 ioc->ctl_cmds.reply = NULL;
8202 ioc->base_cmds.reply = NULL;
8203 ioc->tm_cmds.reply = NULL;
8204 ioc->scsih_cmds.reply = NULL;
8205 ioc->transport_cmds.reply = NULL;
8206 ioc->config_cmds.reply = NULL;
8213 * mpt3sas_base_detach - remove controller instance
8214 * @ioc: per adapter object
8217 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
8219 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8221 mpt3sas_base_stop_watchdog(ioc);
8222 mpt3sas_base_free_resources(ioc);
8223 _base_release_memory_pools(ioc);
8224 mpt3sas_free_enclosure_list(ioc);
8225 pci_set_drvdata(ioc->pdev, NULL);
8226 kfree(ioc->cpu_msix_table);
8227 if (ioc->is_warpdrive)
8228 kfree(ioc->reply_post_host_index);
8229 kfree(ioc->pd_handles);
8230 kfree(ioc->blocking_handles);
8231 kfree(ioc->device_remove_in_progress);
8232 kfree(ioc->pend_os_device_add);
8234 kfree(ioc->ctl_cmds.reply);
8235 kfree(ioc->ctl_cmds.sense);
8236 kfree(ioc->base_cmds.reply);
8237 kfree(ioc->port_enable_cmds.reply);
8238 kfree(ioc->tm_cmds.reply);
8239 kfree(ioc->transport_cmds.reply);
8240 kfree(ioc->scsih_cmds.reply);
8241 kfree(ioc->config_cmds.reply);
8245 * _base_pre_reset_handler - pre reset handler
8246 * @ioc: per adapter object
8248 static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
8250 mpt3sas_scsih_pre_reset_handler(ioc);
8251 mpt3sas_ctl_pre_reset_handler(ioc);
8252 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
8256 * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
8257 * @ioc: per adapter object
8260 _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
8263 ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
8264 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
8265 ioc->transport_cmds.status |= MPT3_CMD_RESET;
8266 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
8267 complete(&ioc->transport_cmds.done);
8269 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
8270 ioc->base_cmds.status |= MPT3_CMD_RESET;
8271 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
8272 complete(&ioc->base_cmds.done);
8274 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
8275 ioc->port_enable_failed = 1;
8276 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
8277 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
8278 if (ioc->is_driver_loading) {
8279 ioc->start_scan_failed =
8280 MPI2_IOCSTATUS_INTERNAL_ERROR;
8281 ioc->start_scan = 0;
8283 complete(&ioc->port_enable_cmds.done);
8286 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
8287 ioc->config_cmds.status |= MPT3_CMD_RESET;
8288 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
8289 ioc->config_cmds.smid = USHRT_MAX;
8290 complete(&ioc->config_cmds.done);
8295 * _base_clear_outstanding_commands - clear all outstanding commands
8296 * @ioc: per adapter object
8298 static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
8300 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
8301 mpt3sas_ctl_clear_outstanding_ioctls(ioc);
8302 _base_clear_outstanding_mpt_commands(ioc);
8306 * _base_reset_done_handler - reset done handler
8307 * @ioc: per adapter object
8309 static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
8311 mpt3sas_scsih_reset_done_handler(ioc);
8312 mpt3sas_ctl_reset_done_handler(ioc);
8313 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
8317 * mpt3sas_wait_for_commands_to_complete - reset controller
8318 * @ioc: Pointer to MPT_ADAPTER structure
8320 * This function is waiting 10s for all pending commands to complete
8321 * prior to putting controller in reset.
8324 mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
8328 ioc->pending_io_count = 0;
8330 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8331 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
8334 /* pending command count */
8335 ioc->pending_io_count = scsi_host_busy(ioc->shost);
8337 if (!ioc->pending_io_count)
8340 /* wait for pending commands to complete */
8341 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
8345 * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
8346 * attributes during online firmware upgrade and update the corresponding
8347 * IOC variables accordingly.
8349 * @ioc: Pointer to MPT_ADAPTER structure
8352 _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
8355 void *pd_handles = NULL, *blocking_handles = NULL;
8356 void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
8357 struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
8359 if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
8360 pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
8361 if (ioc->facts.MaxDevHandle % 8)
8364 pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
8368 "Unable to allocate the memory for pd_handles of sz: %d\n",
8372 memset(pd_handles + ioc->pd_handles_sz, 0,
8373 (pd_handles_sz - ioc->pd_handles_sz));
8374 ioc->pd_handles = pd_handles;
8376 blocking_handles = krealloc(ioc->blocking_handles,
8377 pd_handles_sz, GFP_KERNEL);
8378 if (!blocking_handles) {
8380 "Unable to allocate the memory for "
8381 "blocking_handles of sz: %d\n",
8385 memset(blocking_handles + ioc->pd_handles_sz, 0,
8386 (pd_handles_sz - ioc->pd_handles_sz));
8387 ioc->blocking_handles = blocking_handles;
8388 ioc->pd_handles_sz = pd_handles_sz;
8390 pend_os_device_add = krealloc(ioc->pend_os_device_add,
8391 pd_handles_sz, GFP_KERNEL);
8392 if (!pend_os_device_add) {
8394 "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
8398 memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
8399 (pd_handles_sz - ioc->pend_os_device_add_sz));
8400 ioc->pend_os_device_add = pend_os_device_add;
8401 ioc->pend_os_device_add_sz = pd_handles_sz;
8403 device_remove_in_progress = krealloc(
8404 ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
8405 if (!device_remove_in_progress) {
8407 "Unable to allocate the memory for "
8408 "device_remove_in_progress of sz: %d\n "
8412 memset(device_remove_in_progress +
8413 ioc->device_remove_in_progress_sz, 0,
8414 (pd_handles_sz - ioc->device_remove_in_progress_sz));
8415 ioc->device_remove_in_progress = device_remove_in_progress;
8416 ioc->device_remove_in_progress_sz = pd_handles_sz;
8419 memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
8424 * mpt3sas_base_hard_reset_handler - reset controller
8425 * @ioc: Pointer to MPT_ADAPTER structure
8426 * @type: FORCE_BIG_HAMMER or SOFT_RESET
8428 * Return: 0 for success, non-zero for failure.
8431 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
8432 enum reset_type type)
8435 unsigned long flags;
8437 u8 is_fault = 0, is_trigger = 0;
8439 dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
8441 if (ioc->pci_error_recovery) {
8442 ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
8447 if (mpt3sas_fwfault_debug)
8448 mpt3sas_halt_firmware(ioc);
8450 /* wait for an active reset in progress to complete */
8451 mutex_lock(&ioc->reset_in_progress_mutex);
8453 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
8454 ioc->shost_recovery = 1;
8455 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
8457 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
8458 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
8459 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
8460 MPT3_DIAG_BUFFER_IS_RELEASED))) {
8462 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8463 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
8464 (ioc_state & MPI2_IOC_STATE_MASK) ==
8465 MPI2_IOC_STATE_COREDUMP) {
8467 ioc->htb_rel.trigger_info_dwords[1] =
8468 (ioc_state & MPI2_DOORBELL_DATA_MASK);
8471 _base_pre_reset_handler(ioc);
8472 mpt3sas_wait_for_commands_to_complete(ioc);
8473 mpt3sas_base_mask_interrupts(ioc);
8474 r = _base_make_ioc_ready(ioc, type);
8477 _base_clear_outstanding_commands(ioc);
8479 /* If this hard reset is called while port enable is active, then
8480 * there is no reason to call make_ioc_operational
8482 if (ioc->is_driver_loading && ioc->port_enable_failed) {
8483 ioc->remove_host = 1;
8487 r = _base_get_ioc_facts(ioc);
8491 r = _base_check_ioc_facts_changes(ioc);
8494 "Some of the parameters got changed in this new firmware"
8495 " image and it requires system reboot\n");
8498 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
8499 panic("%s: Issue occurred with flashing controller firmware."
8500 "Please reboot the system and ensure that the correct"
8501 " firmware version is running\n", ioc->name);
8503 r = _base_make_ioc_operational(ioc);
8505 _base_reset_done_handler(ioc);
8508 ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
8510 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
8511 ioc->shost_recovery = 0;
8512 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
8513 ioc->ioc_reset_count++;
8514 mutex_unlock(&ioc->reset_in_progress_mutex);
8517 if ((r == 0) && is_trigger) {
8519 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
8521 mpt3sas_trigger_master(ioc,
8522 MASTER_TRIGGER_ADAPTER_RESET);
8524 dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));