2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
6 * Copyright (C) 2012-2014 LSI Corporation
7 * Copyright (C) 2013-2014 Avago Technologies
8 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/kdev_t.h>
54 #include <linux/blkdev.h>
55 #include <linux/delay.h>
56 #include <linux/interrupt.h>
57 #include <linux/dma-mapping.h>
59 #include <linux/time.h>
60 #include <linux/ktime.h>
61 #include <linux/kthread.h>
62 #include <asm/page.h> /* To get host page size per arch */
63 #include <linux/aer.h>
66 #include "mpt3sas_base.h"
68 static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
71 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
73 /* maximum controller queue depth */
74 #define MAX_HBA_QUEUE_DEPTH 30000
75 #define MAX_CHAIN_DEPTH 100000
76 static int max_queue_depth = -1;
77 module_param(max_queue_depth, int, 0444);
78 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
80 static int max_sgl_entries = -1;
81 module_param(max_sgl_entries, int, 0444);
82 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
84 static int msix_disable = -1;
85 module_param(msix_disable, int, 0444);
86 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
88 static int smp_affinity_enable = 1;
89 module_param(smp_affinity_enable, int, 0444);
90 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
92 static int max_msix_vectors = -1;
93 module_param(max_msix_vectors, int, 0444);
94 MODULE_PARM_DESC(max_msix_vectors,
97 static int irqpoll_weight = -1;
98 module_param(irqpoll_weight, int, 0444);
99 MODULE_PARM_DESC(irqpoll_weight,
100 "irq poll weight (default= one fourth of HBA queue depth)");
102 static int mpt3sas_fwfault_debug;
103 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
104 " enable detection of firmware fault and halt firmware - (default=0)");
106 static int perf_mode = -1;
107 module_param(perf_mode, int, 0444);
108 MODULE_PARM_DESC(perf_mode,
109 "Performance mode (only for Aero/Sea Generation), options:\n\t\t"
110 "0 - balanced: high iops mode is enabled &\n\t\t"
111 "interrupt coalescing is enabled only on high iops queues,\n\t\t"
112 "1 - iops: high iops mode is disabled &\n\t\t"
113 "interrupt coalescing is enabled on all queues,\n\t\t"
114 "2 - latency: high iops mode is disabled &\n\t\t"
115 "interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
116 "\t\tdefault - default perf_mode is 'balanced'"
119 enum mpt3sas_perf_mode {
120 MPT_PERF_MODE_DEFAULT = -1,
121 MPT_PERF_MODE_BALANCED = 0,
122 MPT_PERF_MODE_IOPS = 1,
123 MPT_PERF_MODE_LATENCY = 2,
127 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
128 u32 ioc_state, int timeout);
130 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
132 _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc);
134 _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
137 * mpt3sas_base_check_cmd_timeout - Function
138 * to check timeout and command termination due
141 * @ioc: per adapter object.
142 * @status: Status of issued command.
143 * @mpi_request:mf request pointer.
144 * @sz: size of buffer.
146 * @Returns - 1/0 Reset to be done or Not
149 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
150 u8 status, void *mpi_request, int sz)
154 if (!(status & MPT3_CMD_RESET))
157 ioc_err(ioc, "Command %s\n",
158 issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
159 _debug_dump_mf(mpi_request, sz);
165 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
172 _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
174 int ret = param_set_int(val, kp);
175 struct MPT3SAS_ADAPTER *ioc;
180 /* global ioc spinlock to protect controller list on list operations */
181 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
182 spin_lock(&gioc_lock);
183 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
184 ioc->fwfault_debug = mpt3sas_fwfault_debug;
185 spin_unlock(&gioc_lock);
188 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
189 param_get_int, &mpt3sas_fwfault_debug, 0644);
192 * _base_readl_aero - retry readl for max three times.
193 * @addr - MPT Fusion system interface register address
195 * Retry the readl() for max three times if it gets zero value
196 * while reading the system interface register.
199 _base_readl_aero(const volatile void __iomem *addr)
204 ret_val = readl(addr);
206 } while (ret_val == 0 && i < 3);
212 _base_readl(const volatile void __iomem *addr)
218 * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
221 * @ioc: per adapter object
222 * @reply: reply message frame(lower 32bit addr)
223 * @index: System request message index.
226 _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
230 * 256 is offset within sys register.
231 * 256 offset MPI frame starts. Max MPI frame supported is 32.
232 * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
234 u16 cmd_credit = ioc->facts.RequestCredit + 1;
235 void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
236 MPI_FRAME_START_OFFSET +
237 (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
239 writel(reply, reply_free_iomem);
243 * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
244 * to system/BAR0 region.
246 * @dst_iomem: Pointer to the destination location in BAR0 space.
247 * @src: Pointer to the Source data.
248 * @size: Size of data to be copied.
251 _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
254 u32 *src_virt_mem = (u32 *)src;
256 for (i = 0; i < size/4; i++)
257 writel((u32)src_virt_mem[i],
258 (void __iomem *)dst_iomem + (i * 4));
262 * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
264 * @dst_iomem: Pointer to the destination location in BAR0 space.
265 * @src: Pointer to the Source data.
266 * @size: Size of data to be copied.
269 _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
272 u32 *src_virt_mem = (u32 *)(src);
274 for (i = 0; i < size/4; i++)
275 writel((u32)src_virt_mem[i],
276 (void __iomem *)dst_iomem + (i * 4));
280 * _base_get_chain - Calculates and Returns virtual chain address
281 * for the provided smid in BAR0 space.
283 * @ioc: per adapter object
284 * @smid: system request message index
285 * @sge_chain_count: Scatter gather chain count.
287 * Return: the chain address.
289 static inline void __iomem*
290 _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
293 void __iomem *base_chain, *chain_virt;
294 u16 cmd_credit = ioc->facts.RequestCredit + 1;
296 base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
297 (cmd_credit * ioc->request_sz) +
298 REPLY_FREE_POOL_SIZE;
299 chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
300 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
305 * _base_get_chain_phys - Calculates and Returns physical address
306 * in BAR0 for scatter gather chains, for
309 * @ioc: per adapter object
310 * @smid: system request message index
311 * @sge_chain_count: Scatter gather chain count.
313 * Return: Physical chain address.
315 static inline phys_addr_t
316 _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
319 phys_addr_t base_chain_phys, chain_phys;
320 u16 cmd_credit = ioc->facts.RequestCredit + 1;
322 base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET +
323 (cmd_credit * ioc->request_sz) +
324 REPLY_FREE_POOL_SIZE;
325 chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
326 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
331 * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
332 * buffer address for the provided smid.
333 * (Each smid can have 64K starts from 17024)
335 * @ioc: per adapter object
336 * @smid: system request message index
338 * Return: Pointer to buffer location in BAR0.
341 static void __iomem *
342 _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
344 u16 cmd_credit = ioc->facts.RequestCredit + 1;
345 // Added extra 1 to reach end of chain.
346 void __iomem *chain_end = _base_get_chain(ioc,
348 ioc->facts.MaxChainDepth);
349 return chain_end + (smid * 64 * 1024);
353 * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
354 * Host buffer Physical address for the provided smid.
355 * (Each smid can have 64K starts from 17024)
357 * @ioc: per adapter object
358 * @smid: system request message index
360 * Return: Pointer to buffer location in BAR0.
363 _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
365 u16 cmd_credit = ioc->facts.RequestCredit + 1;
366 phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
368 ioc->facts.MaxChainDepth);
369 return chain_end_phys + (smid * 64 * 1024);
373 * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
374 * lookup list and Provides chain_buffer
375 * address for the matching dma address.
376 * (Each smid can have 64K starts from 17024)
378 * @ioc: per adapter object
379 * @chain_buffer_dma: Chain buffer dma address.
381 * Return: Pointer to chain buffer. Or Null on Failure.
384 _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
385 dma_addr_t chain_buffer_dma)
388 struct chain_tracker *ct;
390 for (index = 0; index < ioc->scsiio_depth; index++) {
391 for (j = 0; j < ioc->chains_needed_per_io; j++) {
392 ct = &ioc->chain_lookup[index].chains_per_smid[j];
393 if (ct && ct->chain_buffer_dma == chain_buffer_dma)
394 return ct->chain_buffer;
397 ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
402 * _clone_sg_entries - MPI EP's scsiio and config requests
403 * are handled here. Base function for
404 * double buffering, before submitting
407 * @ioc: per adapter object.
408 * @mpi_request: mf request pointer.
409 * @smid: system request message index.
411 static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
412 void *mpi_request, u16 smid)
414 Mpi2SGESimple32_t *sgel, *sgel_next;
415 u32 sgl_flags, sge_chain_count = 0;
416 bool is_write = false;
418 void __iomem *buffer_iomem;
419 phys_addr_t buffer_iomem_phys;
420 void __iomem *buff_ptr;
421 phys_addr_t buff_ptr_phys;
422 void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
423 void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
424 phys_addr_t dst_addr_phys;
425 MPI2RequestHeader_t *request_hdr;
426 struct scsi_cmnd *scmd;
427 struct scatterlist *sg_scmd = NULL;
428 int is_scsiio_req = 0;
430 request_hdr = (MPI2RequestHeader_t *) mpi_request;
432 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
433 Mpi25SCSIIORequest_t *scsiio_request =
434 (Mpi25SCSIIORequest_t *)mpi_request;
435 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
437 } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
438 Mpi2ConfigRequest_t *config_req =
439 (Mpi2ConfigRequest_t *)mpi_request;
440 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
444 /* From smid we can get scsi_cmd, once we have sg_scmd,
445 * we just need to get sg_virt and sg_next to get virual
446 * address associated with sgel->Address.
450 /* Get scsi_cmd using smid */
451 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
453 ioc_err(ioc, "scmd is NULL\n");
457 /* Get sg_scmd from scmd provided */
458 sg_scmd = scsi_sglist(scmd);
462 * 0 - 255 System register
463 * 256 - 4352 MPI Frame. (This is based on maxCredit 32)
464 * 4352 - 4864 Reply_free pool (512 byte is reserved
465 * considering maxCredit 32. Reply need extra
466 * room, for mCPU case kept four times of
468 * 4864 - 17152 SGE chain element. (32cmd * 3 chain of
469 * 128 byte size = 12288)
470 * 17152 - x Host buffer mapped with smid.
471 * (Each smid can have 64K Max IO.)
472 * BAR0+Last 1K MSIX Addr and Data
473 * Total size in use 2113664 bytes of 4MB BAR0
476 buffer_iomem = _base_get_buffer_bar0(ioc, smid);
477 buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
479 buff_ptr = buffer_iomem;
480 buff_ptr_phys = buffer_iomem_phys;
481 WARN_ON(buff_ptr_phys > U32_MAX);
483 if (le32_to_cpu(sgel->FlagsLength) &
484 (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
487 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
490 (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
492 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
493 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
495 * Helper function which on passing
496 * chain_buffer_dma returns chain_buffer. Get
497 * the virtual address for sgel->Address
500 _base_get_chain_buffer_dma_to_chain_buffer(ioc,
501 le32_to_cpu(sgel->Address));
502 if (sgel_next == NULL)
505 * This is coping 128 byte chain
506 * frame (not a host buffer)
508 dst_chain_addr[sge_chain_count] =
510 smid, sge_chain_count);
511 src_chain_addr[sge_chain_count] =
513 dst_addr_phys = _base_get_chain_phys(ioc,
514 smid, sge_chain_count);
515 WARN_ON(dst_addr_phys > U32_MAX);
517 cpu_to_le32(lower_32_bits(dst_addr_phys));
521 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
524 _base_clone_to_sys_mem(buff_ptr,
526 (le32_to_cpu(sgel->FlagsLength) &
529 * FIXME: this relies on a a zero
533 cpu_to_le32((u32)buff_ptr_phys);
535 _base_clone_to_sys_mem(buff_ptr,
537 (le32_to_cpu(sgel->FlagsLength) &
540 cpu_to_le32((u32)buff_ptr_phys);
543 buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
545 buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
547 if ((le32_to_cpu(sgel->FlagsLength) &
548 (MPI2_SGE_FLAGS_END_OF_BUFFER
549 << MPI2_SGE_FLAGS_SHIFT)))
550 goto eob_clone_chain;
553 * Every single element in MPT will have
554 * associated sg_next. Better to sanity that
555 * sg_next is not NULL, but it will be a bug
559 sg_scmd = sg_next(sg_scmd);
563 goto eob_clone_chain;
571 for (i = 0; i < sge_chain_count; i++) {
573 _base_clone_to_sys_mem(dst_chain_addr[i],
574 src_chain_addr[i], ioc->request_sz);
579 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
580 * @arg: input argument, used to derive ioc
583 * 0 if controller is removed from pci subsystem.
586 static int mpt3sas_remove_dead_ioc_func(void *arg)
588 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
589 struct pci_dev *pdev;
597 pci_stop_and_remove_bus_device_locked(pdev);
602 * _base_fault_reset_work - workq handling ioc fault conditions
603 * @work: input argument, used to derive ioc
608 _base_fault_reset_work(struct work_struct *work)
610 struct MPT3SAS_ADAPTER *ioc =
611 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
615 struct task_struct *p;
618 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
619 if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
620 ioc->pci_error_recovery)
622 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
624 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
625 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
626 ioc_err(ioc, "SAS host is non-operational !!!!\n");
628 /* It may be possible that EEH recovery can resolve some of
629 * pci bus failure issues rather removing the dead ioc function
630 * by considering controller is in a non-operational state. So
631 * here priority is given to the EEH recovery. If it doesn't
632 * not resolve this issue, mpt3sas driver will consider this
633 * controller to non-operational state and remove the dead ioc
636 if (ioc->non_operational_loop++ < 5) {
637 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
643 * Call _scsih_flush_pending_cmds callback so that we flush all
644 * pending commands back to OS. This call is required to aovid
645 * deadlock at block layer. Dead IOC will fail to do diag reset,
646 * and this call is safe since dead ioc will never return any
647 * command back from HW.
649 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
651 * Set remove_host flag early since kernel thread will
652 * take some time to execute.
654 ioc->remove_host = 1;
655 /*Remove the Dead Host */
656 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
657 "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
659 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
662 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
664 return; /* don't rearm timer */
667 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
668 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
669 ioc->manu_pg11.CoreDumpTOSec :
670 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
672 timeout /= (FAULT_POLLING_INTERVAL/1000);
674 if (ioc->ioc_coredump_loop == 0) {
675 mpt3sas_print_coredump_info(ioc,
676 doorbell & MPI2_DOORBELL_DATA_MASK);
677 /* do not accept any IOs and disable the interrupts */
679 &ioc->ioc_reset_in_progress_lock, flags);
680 ioc->shost_recovery = 1;
681 spin_unlock_irqrestore(
682 &ioc->ioc_reset_in_progress_lock, flags);
683 _base_mask_interrupts(ioc);
684 _base_clear_outstanding_commands(ioc);
687 ioc_info(ioc, "%s: CoreDump loop %d.",
688 __func__, ioc->ioc_coredump_loop);
690 /* Wait until CoreDump completes or times out */
691 if (ioc->ioc_coredump_loop++ < timeout) {
693 &ioc->ioc_reset_in_progress_lock, flags);
698 if (ioc->ioc_coredump_loop) {
699 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
700 ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
701 __func__, ioc->ioc_coredump_loop);
703 ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
704 __func__, ioc->ioc_coredump_loop);
705 ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
707 ioc->non_operational_loop = 0;
708 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
709 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
710 ioc_warn(ioc, "%s: hard reset: %s\n",
711 __func__, rc == 0 ? "success" : "failed");
712 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
713 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
714 mpt3sas_print_fault_code(ioc, doorbell &
715 MPI2_DOORBELL_DATA_MASK);
716 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
717 MPI2_IOC_STATE_COREDUMP)
718 mpt3sas_print_coredump_info(ioc, doorbell &
719 MPI2_DOORBELL_DATA_MASK);
720 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
721 MPI2_IOC_STATE_OPERATIONAL)
722 return; /* don't rearm timer */
724 ioc->ioc_coredump_loop = 0;
726 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
728 if (ioc->fault_reset_work_q)
729 queue_delayed_work(ioc->fault_reset_work_q,
730 &ioc->fault_reset_work,
731 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
732 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
736 * mpt3sas_base_start_watchdog - start the fault_reset_work_q
737 * @ioc: per adapter object
742 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
746 if (ioc->fault_reset_work_q)
749 /* initialize fault polling */
751 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
752 snprintf(ioc->fault_reset_work_q_name,
753 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
754 ioc->driver_name, ioc->id);
755 ioc->fault_reset_work_q =
756 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
757 if (!ioc->fault_reset_work_q) {
758 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
761 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
762 if (ioc->fault_reset_work_q)
763 queue_delayed_work(ioc->fault_reset_work_q,
764 &ioc->fault_reset_work,
765 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
766 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
770 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
771 * @ioc: per adapter object
776 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
779 struct workqueue_struct *wq;
781 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
782 wq = ioc->fault_reset_work_q;
783 ioc->fault_reset_work_q = NULL;
784 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
786 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
788 destroy_workqueue(wq);
793 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
794 * @ioc: per adapter object
795 * @fault_code: fault code
798 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
800 ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
804 * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
805 * @ioc: per adapter object
806 * @fault_code: fault code
811 mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
813 ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
817 * mpt3sas_base_wait_for_coredump_completion - Wait until coredump
818 * completes or times out
819 * @ioc: per adapter object
821 * Returns 0 for success, non-zero for failure.
824 mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
827 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
828 ioc->manu_pg11.CoreDumpTOSec :
829 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
831 int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
836 "%s: CoreDump timed out. (ioc_state=0x%x)\n",
840 "%s: CoreDump completed. (ioc_state=0x%x)\n",
847 * mpt3sas_halt_firmware - halt's mpt controller firmware
848 * @ioc: per adapter object
850 * For debugging timeout related issues. Writing 0xCOFFEE00
851 * to the doorbell register will halt controller firmware. With
852 * the purpose to stop both driver and firmware, the enduser can
853 * obtain a ring buffer from controller UART.
856 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
860 if (!ioc->fwfault_debug)
865 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
866 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
867 mpt3sas_print_fault_code(ioc, doorbell &
868 MPI2_DOORBELL_DATA_MASK);
869 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
870 MPI2_IOC_STATE_COREDUMP) {
871 mpt3sas_print_coredump_info(ioc, doorbell &
872 MPI2_DOORBELL_DATA_MASK);
874 writel(0xC0FFEE00, &ioc->chip->Doorbell);
875 ioc_err(ioc, "Firmware is halted due to command timeout\n");
878 if (ioc->fwfault_debug == 2)
882 panic("panic in %s\n", __func__);
886 * _base_sas_ioc_info - verbose translation of the ioc status
887 * @ioc: per adapter object
888 * @mpi_reply: reply mf payload returned from firmware
889 * @request_hdr: request mf
892 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
893 MPI2RequestHeader_t *request_hdr)
895 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
899 char *func_str = NULL;
901 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
902 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
903 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
904 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
907 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
910 switch (ioc_status) {
912 /****************************************************************************
913 * Common IOCStatus values for all replies
914 ****************************************************************************/
916 case MPI2_IOCSTATUS_INVALID_FUNCTION:
917 desc = "invalid function";
919 case MPI2_IOCSTATUS_BUSY:
922 case MPI2_IOCSTATUS_INVALID_SGL:
923 desc = "invalid sgl";
925 case MPI2_IOCSTATUS_INTERNAL_ERROR:
926 desc = "internal error";
928 case MPI2_IOCSTATUS_INVALID_VPID:
929 desc = "invalid vpid";
931 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
932 desc = "insufficient resources";
934 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
935 desc = "insufficient power";
937 case MPI2_IOCSTATUS_INVALID_FIELD:
938 desc = "invalid field";
940 case MPI2_IOCSTATUS_INVALID_STATE:
941 desc = "invalid state";
943 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
944 desc = "op state not supported";
947 /****************************************************************************
948 * Config IOCStatus values
949 ****************************************************************************/
951 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
952 desc = "config invalid action";
954 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
955 desc = "config invalid type";
957 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
958 desc = "config invalid page";
960 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
961 desc = "config invalid data";
963 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
964 desc = "config no defaults";
966 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
967 desc = "config cant commit";
970 /****************************************************************************
972 ****************************************************************************/
974 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
975 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
976 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
977 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
978 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
979 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
980 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
981 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
982 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
983 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
984 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
985 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
988 /****************************************************************************
989 * For use by SCSI Initiator and SCSI Target end-to-end data protection
990 ****************************************************************************/
992 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
993 desc = "eedp guard error";
995 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
996 desc = "eedp ref tag error";
998 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
999 desc = "eedp app tag error";
1002 /****************************************************************************
1003 * SCSI Target values
1004 ****************************************************************************/
1006 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
1007 desc = "target invalid io index";
1009 case MPI2_IOCSTATUS_TARGET_ABORTED:
1010 desc = "target aborted";
1012 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
1013 desc = "target no conn retryable";
1015 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
1016 desc = "target no connection";
1018 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
1019 desc = "target xfer count mismatch";
1021 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
1022 desc = "target data offset error";
1024 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
1025 desc = "target too much write data";
1027 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
1028 desc = "target iu too short";
1030 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
1031 desc = "target ack nak timeout";
1033 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
1034 desc = "target nak received";
1037 /****************************************************************************
1038 * Serial Attached SCSI values
1039 ****************************************************************************/
1041 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
1042 desc = "smp request failed";
1044 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
1045 desc = "smp data overrun";
1048 /****************************************************************************
1049 * Diagnostic Buffer Post / Diagnostic Release values
1050 ****************************************************************************/
1052 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
1053 desc = "diagnostic released";
1062 switch (request_hdr->Function) {
1063 case MPI2_FUNCTION_CONFIG:
1064 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
1065 func_str = "config_page";
1067 case MPI2_FUNCTION_SCSI_TASK_MGMT:
1068 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
1069 func_str = "task_mgmt";
1071 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
1072 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
1073 func_str = "sas_iounit_ctl";
1075 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
1076 frame_sz = sizeof(Mpi2SepRequest_t);
1077 func_str = "enclosure";
1079 case MPI2_FUNCTION_IOC_INIT:
1080 frame_sz = sizeof(Mpi2IOCInitRequest_t);
1081 func_str = "ioc_init";
1083 case MPI2_FUNCTION_PORT_ENABLE:
1084 frame_sz = sizeof(Mpi2PortEnableRequest_t);
1085 func_str = "port_enable";
1087 case MPI2_FUNCTION_SMP_PASSTHROUGH:
1088 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
1089 func_str = "smp_passthru";
1091 case MPI2_FUNCTION_NVME_ENCAPSULATED:
1092 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
1094 func_str = "nvme_encapsulated";
1098 func_str = "unknown";
1102 ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
1103 desc, ioc_status, request_hdr, func_str);
1105 _debug_dump_mf(request_hdr, frame_sz/4);
1109 * _base_display_event_data - verbose translation of firmware asyn events
1110 * @ioc: per adapter object
1111 * @mpi_reply: reply mf payload returned from firmware
1114 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1115 Mpi2EventNotificationReply_t *mpi_reply)
1120 if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
1123 event = le16_to_cpu(mpi_reply->Event);
1126 case MPI2_EVENT_LOG_DATA:
1129 case MPI2_EVENT_STATE_CHANGE:
1130 desc = "Status Change";
1132 case MPI2_EVENT_HARD_RESET_RECEIVED:
1133 desc = "Hard Reset Received";
1135 case MPI2_EVENT_EVENT_CHANGE:
1136 desc = "Event Change";
1138 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1139 desc = "Device Status Change";
1141 case MPI2_EVENT_IR_OPERATION_STATUS:
1142 if (!ioc->hide_ir_msg)
1143 desc = "IR Operation Status";
1145 case MPI2_EVENT_SAS_DISCOVERY:
1147 Mpi2EventDataSasDiscovery_t *event_data =
1148 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1149 ioc_info(ioc, "Discovery: (%s)",
1150 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1152 if (event_data->DiscoveryStatus)
1153 pr_cont(" discovery_status(0x%08x)",
1154 le32_to_cpu(event_data->DiscoveryStatus));
1158 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1159 desc = "SAS Broadcast Primitive";
1161 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1162 desc = "SAS Init Device Status Change";
1164 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1165 desc = "SAS Init Table Overflow";
1167 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1168 desc = "SAS Topology Change List";
1170 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1171 desc = "SAS Enclosure Device Status Change";
1173 case MPI2_EVENT_IR_VOLUME:
1174 if (!ioc->hide_ir_msg)
1177 case MPI2_EVENT_IR_PHYSICAL_DISK:
1178 if (!ioc->hide_ir_msg)
1179 desc = "IR Physical Disk";
1181 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1182 if (!ioc->hide_ir_msg)
1183 desc = "IR Configuration Change List";
1185 case MPI2_EVENT_LOG_ENTRY_ADDED:
1186 if (!ioc->hide_ir_msg)
1187 desc = "Log Entry Added";
1189 case MPI2_EVENT_TEMP_THRESHOLD:
1190 desc = "Temperature Threshold";
1192 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1193 desc = "Cable Event";
1195 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1196 desc = "SAS Device Discovery Error";
1198 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1199 desc = "PCIE Device Status Change";
1201 case MPI2_EVENT_PCIE_ENUMERATION:
1203 Mpi26EventDataPCIeEnumeration_t *event_data =
1204 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1205 ioc_info(ioc, "PCIE Enumeration: (%s)",
1206 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1208 if (event_data->EnumerationStatus)
1209 pr_cont("enumeration_status(0x%08x)",
1210 le32_to_cpu(event_data->EnumerationStatus));
1214 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1215 desc = "PCIE Topology Change List";
1222 ioc_info(ioc, "%s\n", desc);
1226 * _base_sas_log_info - verbose translation of firmware log info
1227 * @ioc: per adapter object
1228 * @log_info: log info
1231 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
1233 union loginfo_type {
1242 union loginfo_type sas_loginfo;
1243 char *originator_str = NULL;
1245 sas_loginfo.loginfo = log_info;
1246 if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
1249 /* each nexus loss loginfo */
1250 if (log_info == 0x31170000)
1253 /* eat the loginfos associated with task aborts */
1254 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1255 0x31140000 || log_info == 0x31130000))
1258 switch (sas_loginfo.dw.originator) {
1260 originator_str = "IOP";
1263 originator_str = "PL";
1266 if (!ioc->hide_ir_msg)
1267 originator_str = "IR";
1269 originator_str = "WarpDrive";
1273 ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1275 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
1279 * _base_display_reply_info -
1280 * @ioc: per adapter object
1281 * @smid: system request message index
1282 * @msix_index: MSIX table index supplied by the OS
1283 * @reply: reply message frame(lower 32bit addr)
1286 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1289 MPI2DefaultReply_t *mpi_reply;
1293 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1294 if (unlikely(!mpi_reply)) {
1295 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1296 __FILE__, __LINE__, __func__);
1299 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
1301 if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1302 (ioc->logging_level & MPT_DEBUG_REPLY)) {
1303 _base_sas_ioc_info(ioc , mpi_reply,
1304 mpt3sas_base_get_msg_frame(ioc, smid));
1307 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1308 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1309 _base_sas_log_info(ioc, loginfo);
1312 if (ioc_status || loginfo) {
1313 ioc_status &= MPI2_IOCSTATUS_MASK;
1314 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1319 * mpt3sas_base_done - base internal command completion routine
1320 * @ioc: per adapter object
1321 * @smid: system request message index
1322 * @msix_index: MSIX table index supplied by the OS
1323 * @reply: reply message frame(lower 32bit addr)
1326 * 1 meaning mf should be freed from _base_interrupt
1327 * 0 means the mf is freed from this function.
1330 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1333 MPI2DefaultReply_t *mpi_reply;
1335 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1336 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
1337 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1339 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1342 ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1344 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1345 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1347 ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1349 complete(&ioc->base_cmds.done);
1354 * _base_async_event - main callback handler for firmware asyn events
1355 * @ioc: per adapter object
1356 * @msix_index: MSIX table index supplied by the OS
1357 * @reply: reply message frame(lower 32bit addr)
1360 * 1 meaning mf should be freed from _base_interrupt
1361 * 0 means the mf is freed from this function.
1364 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1366 Mpi2EventNotificationReply_t *mpi_reply;
1367 Mpi2EventAckRequest_t *ack_request;
1369 struct _event_ack_list *delayed_event_ack;
1371 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1374 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1377 _base_display_event_data(ioc, mpi_reply);
1379 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1381 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1383 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1385 if (!delayed_event_ack)
1387 INIT_LIST_HEAD(&delayed_event_ack->list);
1388 delayed_event_ack->Event = mpi_reply->Event;
1389 delayed_event_ack->EventContext = mpi_reply->EventContext;
1390 list_add_tail(&delayed_event_ack->list,
1391 &ioc->delayed_event_ack_list);
1393 ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1394 le16_to_cpu(mpi_reply->Event)));
1398 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1399 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1400 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1401 ack_request->Event = mpi_reply->Event;
1402 ack_request->EventContext = mpi_reply->EventContext;
1403 ack_request->VF_ID = 0; /* TODO */
1404 ack_request->VP_ID = 0;
1405 ioc->put_smid_default(ioc, smid);
1409 /* scsih callback handler */
1410 mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1412 /* ctl callback handler */
1413 mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1418 static struct scsiio_tracker *
1419 _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1421 struct scsi_cmnd *cmd;
1423 if (WARN_ON(!smid) ||
1424 WARN_ON(smid >= ioc->hi_priority_smid))
1427 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1429 return scsi_cmd_priv(cmd);
1435 * _base_get_cb_idx - obtain the callback index
1436 * @ioc: per adapter object
1437 * @smid: system request message index
1439 * Return: callback index.
1442 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1445 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1448 if (smid < ioc->hi_priority_smid) {
1449 struct scsiio_tracker *st;
1451 if (smid < ctl_smid) {
1452 st = _get_st_from_smid(ioc, smid);
1454 cb_idx = st->cb_idx;
1455 } else if (smid == ctl_smid)
1456 cb_idx = ioc->ctl_cb_idx;
1457 } else if (smid < ioc->internal_smid) {
1458 i = smid - ioc->hi_priority_smid;
1459 cb_idx = ioc->hpr_lookup[i].cb_idx;
1460 } else if (smid <= ioc->hba_queue_depth) {
1461 i = smid - ioc->internal_smid;
1462 cb_idx = ioc->internal_lookup[i].cb_idx;
1468 * _base_mask_interrupts - disable interrupts
1469 * @ioc: per adapter object
1471 * Disabling ResetIRQ, Reply and Doorbell Interrupts
1474 _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1478 ioc->mask_interrupts = 1;
1479 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1480 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1481 writel(him_register, &ioc->chip->HostInterruptMask);
1482 ioc->base_readl(&ioc->chip->HostInterruptMask);
1486 * _base_unmask_interrupts - enable interrupts
1487 * @ioc: per adapter object
1489 * Enabling only Reply Interrupts
1492 _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1496 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1497 him_register &= ~MPI2_HIM_RIM;
1498 writel(him_register, &ioc->chip->HostInterruptMask);
1499 ioc->mask_interrupts = 0;
1502 union reply_descriptor {
1510 static u32 base_mod64(u64 dividend, u32 divisor)
1515 pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
1516 remainder = do_div(dividend, divisor);
1521 * _base_process_reply_queue - Process reply descriptors from reply
1522 * descriptor post queue.
1523 * @reply_q: per IRQ's reply queue object.
1525 * Return: number of reply descriptors processed from reply
1529 _base_process_reply_queue(struct adapter_reply_queue *reply_q)
1531 union reply_descriptor rd;
1533 u8 request_descript_type;
1537 u8 msix_index = reply_q->msix_index;
1538 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1539 Mpi2ReplyDescriptorsUnion_t *rpf;
1543 if (!atomic_add_unless(&reply_q->busy, 1, 1))
1544 return completed_cmds;
1546 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1547 request_descript_type = rpf->Default.ReplyFlags
1548 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1549 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1550 atomic_dec(&reply_q->busy);
1551 return completed_cmds;
1556 rd.word = le64_to_cpu(rpf->Words);
1557 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1560 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1561 if (request_descript_type ==
1562 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1563 request_descript_type ==
1564 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1565 request_descript_type ==
1566 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
1567 cb_idx = _base_get_cb_idx(ioc, smid);
1568 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1569 (likely(mpt_callbacks[cb_idx] != NULL))) {
1570 rc = mpt_callbacks[cb_idx](ioc, smid,
1573 mpt3sas_base_free_smid(ioc, smid);
1575 } else if (request_descript_type ==
1576 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1577 reply = le32_to_cpu(
1578 rpf->AddressReply.ReplyFrameAddress);
1579 if (reply > ioc->reply_dma_max_address ||
1580 reply < ioc->reply_dma_min_address)
1583 cb_idx = _base_get_cb_idx(ioc, smid);
1584 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1585 (likely(mpt_callbacks[cb_idx] != NULL))) {
1586 rc = mpt_callbacks[cb_idx](ioc, smid,
1589 _base_display_reply_info(ioc,
1590 smid, msix_index, reply);
1592 mpt3sas_base_free_smid(ioc,
1596 _base_async_event(ioc, msix_index, reply);
1599 /* reply free queue handling */
1601 ioc->reply_free_host_index =
1602 (ioc->reply_free_host_index ==
1603 (ioc->reply_free_queue_depth - 1)) ?
1604 0 : ioc->reply_free_host_index + 1;
1605 ioc->reply_free[ioc->reply_free_host_index] =
1607 if (ioc->is_mcpu_endpoint)
1608 _base_clone_reply_to_sys_mem(ioc,
1610 ioc->reply_free_host_index);
1611 writel(ioc->reply_free_host_index,
1612 &ioc->chip->ReplyFreeHostIndex);
1616 rpf->Words = cpu_to_le64(ULLONG_MAX);
1617 reply_q->reply_post_host_index =
1618 (reply_q->reply_post_host_index ==
1619 (ioc->reply_post_queue_depth - 1)) ? 0 :
1620 reply_q->reply_post_host_index + 1;
1621 request_descript_type =
1622 reply_q->reply_post_free[reply_q->reply_post_host_index].
1623 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1625 /* Update the reply post host index after continuously
1626 * processing the threshold number of Reply Descriptors.
1627 * So that FW can find enough entries to post the Reply
1628 * Descriptors in the reply descriptor post queue.
1630 if (!base_mod64(completed_cmds, ioc->thresh_hold)) {
1631 if (ioc->combined_reply_queue) {
1632 writel(reply_q->reply_post_host_index |
1633 ((msix_index & 7) <<
1634 MPI2_RPHI_MSIX_INDEX_SHIFT),
1635 ioc->replyPostRegisterIndex[msix_index/8]);
1637 writel(reply_q->reply_post_host_index |
1639 MPI2_RPHI_MSIX_INDEX_SHIFT),
1640 &ioc->chip->ReplyPostHostIndex);
1642 if (!reply_q->irq_poll_scheduled) {
1643 reply_q->irq_poll_scheduled = true;
1644 irq_poll_sched(&reply_q->irqpoll);
1646 atomic_dec(&reply_q->busy);
1647 return completed_cmds;
1649 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1651 if (!reply_q->reply_post_host_index)
1652 rpf = reply_q->reply_post_free;
1659 if (!completed_cmds) {
1660 atomic_dec(&reply_q->busy);
1661 return completed_cmds;
1664 if (ioc->is_warpdrive) {
1665 writel(reply_q->reply_post_host_index,
1666 ioc->reply_post_host_index[msix_index]);
1667 atomic_dec(&reply_q->busy);
1668 return completed_cmds;
1671 /* Update Reply Post Host Index.
1672 * For those HBA's which support combined reply queue feature
1673 * 1. Get the correct Supplemental Reply Post Host Index Register.
1674 * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1675 * Index Register address bank i.e replyPostRegisterIndex[],
1676 * 2. Then update this register with new reply host index value
1677 * in ReplyPostIndex field and the MSIxIndex field with
1678 * msix_index value reduced to a value between 0 and 7,
1679 * using a modulo 8 operation. Since each Supplemental Reply Post
1680 * Host Index Register supports 8 MSI-X vectors.
1682 * For other HBA's just update the Reply Post Host Index register with
1683 * new reply host index value in ReplyPostIndex Field and msix_index
1684 * value in MSIxIndex field.
1686 if (ioc->combined_reply_queue)
1687 writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
1688 MPI2_RPHI_MSIX_INDEX_SHIFT),
1689 ioc->replyPostRegisterIndex[msix_index/8]);
1691 writel(reply_q->reply_post_host_index | (msix_index <<
1692 MPI2_RPHI_MSIX_INDEX_SHIFT),
1693 &ioc->chip->ReplyPostHostIndex);
1694 atomic_dec(&reply_q->busy);
1695 return completed_cmds;
1699 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
1700 * @irq: irq number (not used)
1701 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
1703 * Return: IRQ_HANDLED if processed, else IRQ_NONE.
1706 _base_interrupt(int irq, void *bus_id)
1708 struct adapter_reply_queue *reply_q = bus_id;
1709 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1711 if (ioc->mask_interrupts)
1713 if (reply_q->irq_poll_scheduled)
1715 return ((_base_process_reply_queue(reply_q) > 0) ?
1716 IRQ_HANDLED : IRQ_NONE);
1720 * _base_irqpoll - IRQ poll callback handler
1721 * @irqpoll - irq_poll object
1722 * @budget - irq poll weight
1724 * returns number of reply descriptors processed
1727 _base_irqpoll(struct irq_poll *irqpoll, int budget)
1729 struct adapter_reply_queue *reply_q;
1730 int num_entries = 0;
1732 reply_q = container_of(irqpoll, struct adapter_reply_queue,
1734 if (reply_q->irq_line_enable) {
1735 disable_irq(reply_q->os_irq);
1736 reply_q->irq_line_enable = false;
1738 num_entries = _base_process_reply_queue(reply_q);
1739 if (num_entries < budget) {
1740 irq_poll_complete(irqpoll);
1741 reply_q->irq_poll_scheduled = false;
1742 reply_q->irq_line_enable = true;
1743 enable_irq(reply_q->os_irq);
1750 * _base_init_irqpolls - initliaze IRQ polls
1751 * @ioc: per adapter object
1756 _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
1758 struct adapter_reply_queue *reply_q, *next;
1760 if (list_empty(&ioc->reply_queue_list))
1763 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1764 irq_poll_init(&reply_q->irqpoll,
1765 ioc->hba_queue_depth/4, _base_irqpoll);
1766 reply_q->irq_poll_scheduled = false;
1767 reply_q->irq_line_enable = true;
1768 reply_q->os_irq = pci_irq_vector(ioc->pdev,
1769 reply_q->msix_index);
1774 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1775 * @ioc: per adapter object
1777 * Return: Whether or not MSI/X is enabled.
1780 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1782 return (ioc->facts.IOCCapabilities &
1783 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1787 * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
1788 * @ioc: per adapter object
1789 * Context: non ISR conext
1791 * Called when a Task Management request has completed.
1794 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
1796 struct adapter_reply_queue *reply_q;
1798 /* If MSIX capability is turned off
1799 * then multi-queues are not enabled
1801 if (!_base_is_controller_msix_enabled(ioc))
1804 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1805 if (ioc->shost_recovery || ioc->remove_host ||
1806 ioc->pci_error_recovery)
1808 /* TMs are on msix_index == 0 */
1809 if (reply_q->msix_index == 0)
1811 if (reply_q->irq_poll_scheduled) {
1812 /* Calling irq_poll_disable will wait for any pending
1813 * callbacks to have completed.
1815 irq_poll_disable(&reply_q->irqpoll);
1816 irq_poll_enable(&reply_q->irqpoll);
1817 reply_q->irq_poll_scheduled = false;
1818 reply_q->irq_line_enable = true;
1819 enable_irq(reply_q->os_irq);
1822 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
1827 * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1828 * @cb_idx: callback index
1831 mpt3sas_base_release_callback_handler(u8 cb_idx)
1833 mpt_callbacks[cb_idx] = NULL;
1837 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1838 * @cb_func: callback function
1840 * Return: Index of @cb_func.
1843 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1847 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1848 if (mpt_callbacks[cb_idx] == NULL)
1851 mpt_callbacks[cb_idx] = cb_func;
1856 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1859 mpt3sas_base_initialize_callback_handler(void)
1863 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1864 mpt3sas_base_release_callback_handler(cb_idx);
1869 * _base_build_zero_len_sge - build zero length sg entry
1870 * @ioc: per adapter object
1871 * @paddr: virtual address for SGE
1873 * Create a zero length scatter gather entry to insure the IOCs hardware has
1874 * something to use if the target device goes brain dead and tries
1875 * to send data even when none is asked for.
1878 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1880 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1881 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1882 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1883 MPI2_SGE_FLAGS_SHIFT);
1884 ioc->base_add_sg_single(paddr, flags_length, -1);
1888 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1889 * @paddr: virtual address for SGE
1890 * @flags_length: SGE flags and data transfer length
1891 * @dma_addr: Physical address
1894 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1896 Mpi2SGESimple32_t *sgel = paddr;
1898 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1899 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1900 sgel->FlagsLength = cpu_to_le32(flags_length);
1901 sgel->Address = cpu_to_le32(dma_addr);
1906 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1907 * @paddr: virtual address for SGE
1908 * @flags_length: SGE flags and data transfer length
1909 * @dma_addr: Physical address
1912 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1914 Mpi2SGESimple64_t *sgel = paddr;
1916 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1917 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1918 sgel->FlagsLength = cpu_to_le32(flags_length);
1919 sgel->Address = cpu_to_le64(dma_addr);
1923 * _base_get_chain_buffer_tracker - obtain chain tracker
1924 * @ioc: per adapter object
1925 * @scmd: SCSI commands of the IO request
1927 * Return: chain tracker from chain_lookup table using key as
1928 * smid and smid's chain_offset.
1930 static struct chain_tracker *
1931 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
1932 struct scsi_cmnd *scmd)
1934 struct chain_tracker *chain_req;
1935 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
1936 u16 smid = st->smid;
1938 atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
1940 if (chain_offset == ioc->chains_needed_per_io)
1943 chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
1944 atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
1950 * _base_build_sg - build generic sg
1951 * @ioc: per adapter object
1952 * @psge: virtual address for SGE
1953 * @data_out_dma: physical address for WRITES
1954 * @data_out_sz: data xfer size for WRITES
1955 * @data_in_dma: physical address for READS
1956 * @data_in_sz: data xfer size for READS
1959 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1960 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1965 if (!data_out_sz && !data_in_sz) {
1966 _base_build_zero_len_sge(ioc, psge);
1970 if (data_out_sz && data_in_sz) {
1971 /* WRITE sgel first */
1972 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1973 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1974 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1975 ioc->base_add_sg_single(psge, sgl_flags |
1976 data_out_sz, data_out_dma);
1979 psge += ioc->sge_size;
1981 /* READ sgel last */
1982 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1983 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1984 MPI2_SGE_FLAGS_END_OF_LIST);
1985 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1986 ioc->base_add_sg_single(psge, sgl_flags |
1987 data_in_sz, data_in_dma);
1988 } else if (data_out_sz) /* WRITE */ {
1989 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1990 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1991 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1992 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1993 ioc->base_add_sg_single(psge, sgl_flags |
1994 data_out_sz, data_out_dma);
1995 } else if (data_in_sz) /* READ */ {
1996 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1997 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1998 MPI2_SGE_FLAGS_END_OF_LIST);
1999 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2000 ioc->base_add_sg_single(psge, sgl_flags |
2001 data_in_sz, data_in_dma);
2005 /* IEEE format sgls */
2008 * _base_build_nvme_prp - This function is called for NVMe end devices to build
2009 * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
2010 * entry of the NVMe message (PRP1). If the data buffer is small enough to be
2011 * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is
2012 * used to describe a larger data buffer. If the data buffer is too large to
2013 * describe using the two PRP entriess inside the NVMe message, then PRP1
2014 * describes the first data memory segment, and PRP2 contains a pointer to a PRP
2015 * list located elsewhere in memory to describe the remaining data memory
2016 * segments. The PRP list will be contiguous.
2018 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
2019 * consists of a list of PRP entries to describe a number of noncontigous
2020 * physical memory segments as a single memory buffer, just as a SGL does. Note
2021 * however, that this function is only used by the IOCTL call, so the memory
2022 * given will be guaranteed to be contiguous. There is no need to translate
2023 * non-contiguous SGL into a PRP in this case. All PRPs will describe
2024 * contiguous space that is one page size each.
2026 * Each NVMe message contains two PRP entries. The first (PRP1) either contains
2027 * a PRP list pointer or a PRP element, depending upon the command. PRP2
2028 * contains the second PRP element if the memory being described fits within 2
2029 * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
2031 * A PRP list pointer contains the address of a PRP list, structured as a linear
2032 * array of PRP entries. Each PRP entry in this list describes a segment of
2035 * Each 64-bit PRP entry comprises an address and an offset field. The address
2036 * always points at the beginning of a 4KB physical memory page, and the offset
2037 * describes where within that 4KB page the memory segment begins. Only the
2038 * first element in a PRP list may contain a non-zero offest, implying that all
2039 * memory segments following the first begin at the start of a 4KB page.
2041 * Each PRP element normally describes 4KB of physical memory, with exceptions
2042 * for the first and last elements in the list. If the memory being described
2043 * by the list begins at a non-zero offset within the first 4KB page, then the
2044 * first PRP element will contain a non-zero offset indicating where the region
2045 * begins within the 4KB page. The last memory segment may end before the end
2046 * of the 4KB segment, depending upon the overall size of the memory being
2047 * described by the PRP list.
2049 * Since PRP entries lack any indication of size, the overall data buffer length
2050 * is used to determine where the end of the data memory buffer is located, and
2051 * how many PRP entries are required to describe it.
2053 * @ioc: per adapter object
2054 * @smid: system request message index for getting asscociated SGL
2055 * @nvme_encap_request: the NVMe request msg frame pointer
2056 * @data_out_dma: physical address for WRITES
2057 * @data_out_sz: data xfer size for WRITES
2058 * @data_in_dma: physical address for READS
2059 * @data_in_sz: data xfer size for READS
2062 _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2063 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
2064 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2067 int prp_size = NVME_PRP_SIZE;
2068 __le64 *prp_entry, *prp1_entry, *prp2_entry;
2070 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
2071 u32 offset, entry_len;
2072 u32 page_mask_result, page_mask;
2074 struct mpt3sas_nvme_cmd *nvme_cmd =
2075 (void *)nvme_encap_request->NVMe_Command;
2078 * Not all commands require a data transfer. If no data, just return
2079 * without constructing any PRP.
2081 if (!data_in_sz && !data_out_sz)
2083 prp1_entry = &nvme_cmd->prp1;
2084 prp2_entry = &nvme_cmd->prp2;
2085 prp_entry = prp1_entry;
2087 * For the PRP entries, use the specially allocated buffer of
2088 * contiguous memory.
2090 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
2091 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2094 * Check if we are within 1 entry of a page boundary we don't
2095 * want our first entry to be a PRP List entry.
2097 page_mask = ioc->page_size - 1;
2098 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
2099 if (!page_mask_result) {
2100 /* Bump up to next page boundary. */
2101 prp_page = (__le64 *)((u8 *)prp_page + prp_size);
2102 prp_page_dma = prp_page_dma + prp_size;
2106 * Set PRP physical pointer, which initially points to the current PRP
2109 prp_entry_dma = prp_page_dma;
2111 /* Get physical address and length of the data buffer. */
2113 dma_addr = data_in_dma;
2114 length = data_in_sz;
2116 dma_addr = data_out_dma;
2117 length = data_out_sz;
2120 /* Loop while the length is not zero. */
2123 * Check if we need to put a list pointer here if we are at
2124 * page boundary - prp_size (8 bytes).
2126 page_mask_result = (prp_entry_dma + prp_size) & page_mask;
2127 if (!page_mask_result) {
2129 * This is the last entry in a PRP List, so we need to
2130 * put a PRP list pointer here. What this does is:
2131 * - bump the current memory pointer to the next
2132 * address, which will be the next full page.
2133 * - set the PRP Entry to point to that page. This
2134 * is now the PRP List pointer.
2135 * - bump the PRP Entry pointer the start of the
2136 * next page. Since all of this PRP memory is
2137 * contiguous, no need to get a new page - it's
2138 * just the next address.
2141 *prp_entry = cpu_to_le64(prp_entry_dma);
2145 /* Need to handle if entry will be part of a page. */
2146 offset = dma_addr & page_mask;
2147 entry_len = ioc->page_size - offset;
2149 if (prp_entry == prp1_entry) {
2151 * Must fill in the first PRP pointer (PRP1) before
2154 *prp1_entry = cpu_to_le64(dma_addr);
2157 * Now point to the second PRP entry within the
2160 prp_entry = prp2_entry;
2161 } else if (prp_entry == prp2_entry) {
2163 * Should the PRP2 entry be a PRP List pointer or just
2164 * a regular PRP pointer? If there is more than one
2165 * more page of data, must use a PRP List pointer.
2167 if (length > ioc->page_size) {
2169 * PRP2 will contain a PRP List pointer because
2170 * more PRP's are needed with this command. The
2171 * list will start at the beginning of the
2172 * contiguous buffer.
2174 *prp2_entry = cpu_to_le64(prp_entry_dma);
2177 * The next PRP Entry will be the start of the
2180 prp_entry = prp_page;
2183 * After this, the PRP Entries are complete.
2184 * This command uses 2 PRP's and no PRP list.
2186 *prp2_entry = cpu_to_le64(dma_addr);
2190 * Put entry in list and bump the addresses.
2192 * After PRP1 and PRP2 are filled in, this will fill in
2193 * all remaining PRP entries in a PRP List, one per
2194 * each time through the loop.
2196 *prp_entry = cpu_to_le64(dma_addr);
2202 * Bump the phys address of the command's data buffer by the
2205 dma_addr += entry_len;
2207 /* Decrement length accounting for last partial page. */
2208 if (entry_len > length)
2211 length -= entry_len;
2216 * base_make_prp_nvme -
2217 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
2219 * @ioc: per adapter object
2220 * @scmd: SCSI command from the mid-layer
2221 * @mpi_request: mpi request
2223 * @sge_count: scatter gather element count.
2225 * Return: true: PRPs are built
2226 * false: IEEE SGLs needs to be built
2229 base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2230 struct scsi_cmnd *scmd,
2231 Mpi25SCSIIORequest_t *mpi_request,
2232 u16 smid, int sge_count)
2234 int sge_len, num_prp_in_chain = 0;
2235 Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
2237 dma_addr_t msg_dma, sge_addr, offset;
2238 u32 page_mask, page_mask_result;
2239 struct scatterlist *sg_scmd;
2241 int data_len = scsi_bufflen(scmd);
2244 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2246 * Nvme has a very convoluted prp format. One prp is required
2247 * for each page or partial page. Driver need to split up OS sg_list
2248 * entries if it is longer than one page or cross a page
2249 * boundary. Driver also have to insert a PRP list pointer entry as
2250 * the last entry in each physical page of the PRP list.
2252 * NOTE: The first PRP "entry" is actually placed in the first
2253 * SGL entry in the main message as IEEE 64 format. The 2nd
2254 * entry in the main message is the chain element, and the rest
2255 * of the PRP entries are built in the contiguous pcie buffer.
2257 page_mask = nvme_pg_size - 1;
2260 * Native SGL is needed.
2261 * Put a chain element in main message frame that points to the first
2264 * NOTE: The ChainOffset field must be 0 when using a chain pointer to
2268 /* Set main message chain element pointer */
2269 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2271 * For NVMe the chain element needs to be the 2nd SG entry in the main
2274 main_chain_element = (Mpi25IeeeSgeChain64_t *)
2275 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2278 * For the PRP entries, use the specially allocated buffer of
2279 * contiguous memory. Normal chain buffers can't be used
2280 * because each chain buffer would need to be the size of an OS
2283 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2284 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2286 main_chain_element->Address = cpu_to_le64(msg_dma);
2287 main_chain_element->NextChainOffset = 0;
2288 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2289 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2290 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2292 /* Build first prp, sge need not to be page aligned*/
2293 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2294 sg_scmd = scsi_sglist(scmd);
2295 sge_addr = sg_dma_address(sg_scmd);
2296 sge_len = sg_dma_len(sg_scmd);
2298 offset = sge_addr & page_mask;
2299 first_prp_len = nvme_pg_size - offset;
2301 ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2302 ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2304 data_len -= first_prp_len;
2306 if (sge_len > first_prp_len) {
2307 sge_addr += first_prp_len;
2308 sge_len -= first_prp_len;
2309 } else if (data_len && (sge_len == first_prp_len)) {
2310 sg_scmd = sg_next(sg_scmd);
2311 sge_addr = sg_dma_address(sg_scmd);
2312 sge_len = sg_dma_len(sg_scmd);
2316 offset = sge_addr & page_mask;
2318 /* Put PRP pointer due to page boundary*/
2319 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2320 if (unlikely(!page_mask_result)) {
2321 scmd_printk(KERN_NOTICE,
2322 scmd, "page boundary curr_buff: 0x%p\n",
2325 *curr_buff = cpu_to_le64(msg_dma);
2330 *curr_buff = cpu_to_le64(sge_addr);
2335 sge_addr += nvme_pg_size;
2336 sge_len -= nvme_pg_size;
2337 data_len -= nvme_pg_size;
2345 sg_scmd = sg_next(sg_scmd);
2346 sge_addr = sg_dma_address(sg_scmd);
2347 sge_len = sg_dma_len(sg_scmd);
2350 main_chain_element->Length =
2351 cpu_to_le32(num_prp_in_chain * sizeof(u64));
2356 base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2357 struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2359 u32 data_length = 0;
2360 bool build_prp = true;
2362 data_length = scsi_bufflen(scmd);
2364 (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
2369 /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
2372 if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2379 * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
2380 * determine if the driver needs to build a native SGL. If so, that native
2381 * SGL is built in the special contiguous buffers allocated especially for
2382 * PCIe SGL creation. If the driver will not build a native SGL, return
2383 * TRUE and a normal IEEE SGL will be built. Currently this routine
2385 * @ioc: per adapter object
2386 * @mpi_request: mf request pointer
2387 * @smid: system request message index
2388 * @scmd: scsi command
2389 * @pcie_device: points to the PCIe device's info
2391 * Return: 0 if native SGL was built, 1 if no SGL was built
2394 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2395 Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2396 struct _pcie_device *pcie_device)
2400 /* Get the SG list pointer and info. */
2401 sges_left = scsi_dma_map(scmd);
2402 if (sges_left < 0) {
2403 sdev_printk(KERN_ERR, scmd->device,
2404 "scsi_dma_map failed: request for %d bytes!\n",
2405 scsi_bufflen(scmd));
2409 /* Check if we need to build a native SG list. */
2410 if (base_is_prp_possible(ioc, pcie_device,
2411 scmd, sges_left) == 0) {
2412 /* We built a native SG list, just return. */
2417 * Build native NVMe PRP.
2419 base_make_prp_nvme(ioc, scmd, mpi_request,
2424 scsi_dma_unmap(scmd);
2429 * _base_add_sg_single_ieee - add sg element for IEEE format
2430 * @paddr: virtual address for SGE
2432 * @chain_offset: number of 128 byte elements from start of segment
2433 * @length: data transfer length
2434 * @dma_addr: Physical address
2437 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2438 dma_addr_t dma_addr)
2440 Mpi25IeeeSgeChain64_t *sgel = paddr;
2442 sgel->Flags = flags;
2443 sgel->NextChainOffset = chain_offset;
2444 sgel->Length = cpu_to_le32(length);
2445 sgel->Address = cpu_to_le64(dma_addr);
2449 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
2450 * @ioc: per adapter object
2451 * @paddr: virtual address for SGE
2453 * Create a zero length scatter gather entry to insure the IOCs hardware has
2454 * something to use if the target device goes brain dead and tries
2455 * to send data even when none is asked for.
2458 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2460 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2461 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2462 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
2464 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2468 * _base_build_sg_scmd - main sg creation routine
2469 * pcie_device is unused here!
2470 * @ioc: per adapter object
2471 * @scmd: scsi command
2472 * @smid: system request message index
2473 * @unused: unused pcie_device pointer
2476 * The main routine that builds scatter gather table from a given
2477 * scsi request sent via the .queuecommand main handler.
2479 * Return: 0 success, anything else error
2482 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2483 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
2485 Mpi2SCSIIORequest_t *mpi_request;
2486 dma_addr_t chain_dma;
2487 struct scatterlist *sg_scmd;
2488 void *sg_local, *chain;
2493 u32 sges_in_segment;
2495 u32 sgl_flags_last_element;
2496 u32 sgl_flags_end_buffer;
2497 struct chain_tracker *chain_req;
2499 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2501 /* init scatter gather flags */
2502 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2503 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2504 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2505 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2506 << MPI2_SGE_FLAGS_SHIFT;
2507 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2508 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2509 << MPI2_SGE_FLAGS_SHIFT;
2510 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2512 sg_scmd = scsi_sglist(scmd);
2513 sges_left = scsi_dma_map(scmd);
2514 if (sges_left < 0) {
2515 sdev_printk(KERN_ERR, scmd->device,
2516 "scsi_dma_map failed: request for %d bytes!\n",
2517 scsi_bufflen(scmd));
2521 sg_local = &mpi_request->SGL;
2522 sges_in_segment = ioc->max_sges_in_main_message;
2523 if (sges_left <= sges_in_segment)
2524 goto fill_in_last_segment;
2526 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2527 (sges_in_segment * ioc->sge_size))/4;
2529 /* fill in main message segment when there is a chain following */
2530 while (sges_in_segment) {
2531 if (sges_in_segment == 1)
2532 ioc->base_add_sg_single(sg_local,
2533 sgl_flags_last_element | sg_dma_len(sg_scmd),
2534 sg_dma_address(sg_scmd));
2536 ioc->base_add_sg_single(sg_local, sgl_flags |
2537 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2538 sg_scmd = sg_next(sg_scmd);
2539 sg_local += ioc->sge_size;
2544 /* initializing the chain flags and pointers */
2545 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
2546 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2549 chain = chain_req->chain_buffer;
2550 chain_dma = chain_req->chain_buffer_dma;
2552 sges_in_segment = (sges_left <=
2553 ioc->max_sges_in_chain_message) ? sges_left :
2554 ioc->max_sges_in_chain_message;
2555 chain_offset = (sges_left == sges_in_segment) ?
2556 0 : (sges_in_segment * ioc->sge_size)/4;
2557 chain_length = sges_in_segment * ioc->sge_size;
2559 chain_offset = chain_offset <<
2560 MPI2_SGE_CHAIN_OFFSET_SHIFT;
2561 chain_length += ioc->sge_size;
2563 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2564 chain_length, chain_dma);
2567 goto fill_in_last_segment;
2569 /* fill in chain segments */
2570 while (sges_in_segment) {
2571 if (sges_in_segment == 1)
2572 ioc->base_add_sg_single(sg_local,
2573 sgl_flags_last_element |
2574 sg_dma_len(sg_scmd),
2575 sg_dma_address(sg_scmd));
2577 ioc->base_add_sg_single(sg_local, sgl_flags |
2578 sg_dma_len(sg_scmd),
2579 sg_dma_address(sg_scmd));
2580 sg_scmd = sg_next(sg_scmd);
2581 sg_local += ioc->sge_size;
2586 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2589 chain = chain_req->chain_buffer;
2590 chain_dma = chain_req->chain_buffer_dma;
2594 fill_in_last_segment:
2596 /* fill the last segment */
2599 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2600 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2602 ioc->base_add_sg_single(sg_local, sgl_flags |
2603 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2604 sg_scmd = sg_next(sg_scmd);
2605 sg_local += ioc->sge_size;
2613 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
2614 * @ioc: per adapter object
2615 * @scmd: scsi command
2616 * @smid: system request message index
2617 * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
2618 * constructed on need.
2621 * The main routine that builds scatter gather table from a given
2622 * scsi request sent via the .queuecommand main handler.
2624 * Return: 0 success, anything else error
2627 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2628 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
2630 Mpi25SCSIIORequest_t *mpi_request;
2631 dma_addr_t chain_dma;
2632 struct scatterlist *sg_scmd;
2633 void *sg_local, *chain;
2637 u32 sges_in_segment;
2638 u8 simple_sgl_flags;
2639 u8 simple_sgl_flags_last;
2641 struct chain_tracker *chain_req;
2643 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2645 /* init scatter gather flags */
2646 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2647 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2648 simple_sgl_flags_last = simple_sgl_flags |
2649 MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2650 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2651 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2653 /* Check if we need to build a native SG list. */
2654 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2655 smid, scmd, pcie_device) == 0)) {
2656 /* We built a native SG list, just return. */
2660 sg_scmd = scsi_sglist(scmd);
2661 sges_left = scsi_dma_map(scmd);
2662 if (sges_left < 0) {
2663 sdev_printk(KERN_ERR, scmd->device,
2664 "scsi_dma_map failed: request for %d bytes!\n",
2665 scsi_bufflen(scmd));
2669 sg_local = &mpi_request->SGL;
2670 sges_in_segment = (ioc->request_sz -
2671 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2672 if (sges_left <= sges_in_segment)
2673 goto fill_in_last_segment;
2675 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
2676 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2678 /* fill in main message segment when there is a chain following */
2679 while (sges_in_segment > 1) {
2680 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2681 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2682 sg_scmd = sg_next(sg_scmd);
2683 sg_local += ioc->sge_size_ieee;
2688 /* initializing the pointers */
2689 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2692 chain = chain_req->chain_buffer;
2693 chain_dma = chain_req->chain_buffer_dma;
2695 sges_in_segment = (sges_left <=
2696 ioc->max_sges_in_chain_message) ? sges_left :
2697 ioc->max_sges_in_chain_message;
2698 chain_offset = (sges_left == sges_in_segment) ?
2699 0 : sges_in_segment;
2700 chain_length = sges_in_segment * ioc->sge_size_ieee;
2702 chain_length += ioc->sge_size_ieee;
2703 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2704 chain_offset, chain_length, chain_dma);
2708 goto fill_in_last_segment;
2710 /* fill in chain segments */
2711 while (sges_in_segment) {
2712 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2713 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2714 sg_scmd = sg_next(sg_scmd);
2715 sg_local += ioc->sge_size_ieee;
2720 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2723 chain = chain_req->chain_buffer;
2724 chain_dma = chain_req->chain_buffer_dma;
2728 fill_in_last_segment:
2730 /* fill the last segment */
2731 while (sges_left > 0) {
2733 _base_add_sg_single_ieee(sg_local,
2734 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2735 sg_dma_address(sg_scmd));
2737 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2738 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2739 sg_scmd = sg_next(sg_scmd);
2740 sg_local += ioc->sge_size_ieee;
2748 * _base_build_sg_ieee - build generic sg for IEEE format
2749 * @ioc: per adapter object
2750 * @psge: virtual address for SGE
2751 * @data_out_dma: physical address for WRITES
2752 * @data_out_sz: data xfer size for WRITES
2753 * @data_in_dma: physical address for READS
2754 * @data_in_sz: data xfer size for READS
2757 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2758 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2763 if (!data_out_sz && !data_in_sz) {
2764 _base_build_zero_len_sge_ieee(ioc, psge);
2768 if (data_out_sz && data_in_sz) {
2769 /* WRITE sgel first */
2770 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2771 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2772 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2776 psge += ioc->sge_size_ieee;
2778 /* READ sgel last */
2779 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2780 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2782 } else if (data_out_sz) /* WRITE */ {
2783 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2784 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2785 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2786 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2788 } else if (data_in_sz) /* READ */ {
2789 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2790 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2791 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2792 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2797 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2800 * _base_config_dma_addressing - set dma addressing
2801 * @ioc: per adapter object
2802 * @pdev: PCI device struct
2804 * Return: 0 for success, non-zero for failure.
2807 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2812 if (ioc->is_mcpu_endpoint ||
2813 sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
2814 dma_get_required_mask(&pdev->dev) <= 32)
2816 /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
2817 else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
2822 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
2823 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)))
2826 if (dma_mask > 32) {
2827 ioc->base_add_sg_single = &_base_add_sg_single_64;
2828 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2830 ioc->base_add_sg_single = &_base_add_sg_single_32;
2831 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
2835 ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2836 dma_mask, convert_to_kb(s.totalram));
2842 * _base_check_enable_msix - checks MSIX capabable.
2843 * @ioc: per adapter object
2845 * Check to see if card is capable of MSIX, and set number
2846 * of available msix vectors
2849 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2852 u16 message_control;
2854 /* Check whether controller SAS2008 B0 controller,
2855 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
2857 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
2858 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
2862 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2864 dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
2868 /* get msix vector count */
2869 /* NUMA_IO not supported for older controllers */
2870 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
2871 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
2872 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
2873 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
2874 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
2875 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
2876 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
2877 ioc->msix_vector_count = 1;
2879 pci_read_config_word(ioc->pdev, base + 2, &message_control);
2880 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2882 dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
2883 ioc->msix_vector_count));
2888 * _base_free_irq - free irq
2889 * @ioc: per adapter object
2891 * Freeing respective reply_queue from the list.
2894 _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
2896 struct adapter_reply_queue *reply_q, *next;
2898 if (list_empty(&ioc->reply_queue_list))
2901 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
2902 list_del(&reply_q->list);
2903 if (ioc->smp_affinity_enable)
2904 irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
2905 reply_q->msix_index), NULL);
2906 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
2913 * _base_request_irq - request irq
2914 * @ioc: per adapter object
2915 * @index: msix index into vector table
2917 * Inserting respective reply_queue into the list.
2920 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
2922 struct pci_dev *pdev = ioc->pdev;
2923 struct adapter_reply_queue *reply_q;
2926 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
2928 ioc_err(ioc, "unable to allocate memory %zu!\n",
2929 sizeof(struct adapter_reply_queue));
2933 reply_q->msix_index = index;
2935 atomic_set(&reply_q->busy, 0);
2936 if (ioc->msix_enable)
2937 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
2938 ioc->driver_name, ioc->id, index);
2940 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
2941 ioc->driver_name, ioc->id);
2942 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
2943 IRQF_SHARED, reply_q->name, reply_q);
2945 pr_err("%s: unable to allocate interrupt %d!\n",
2946 reply_q->name, pci_irq_vector(pdev, index));
2951 INIT_LIST_HEAD(&reply_q->list);
2952 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
2957 * _base_assign_reply_queues - assigning msix index for each cpu
2958 * @ioc: per adapter object
2960 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
2962 * It would nice if we could call irq_set_affinity, however it is not
2963 * an exported symbol
2966 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
2968 unsigned int cpu, nr_cpus, nr_msix, index = 0;
2969 struct adapter_reply_queue *reply_q;
2970 int local_numa_node;
2972 if (!_base_is_controller_msix_enabled(ioc))
2975 if (ioc->msix_load_balance)
2978 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
2980 nr_cpus = num_online_cpus();
2981 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
2982 ioc->facts.MaxMSIxVectors);
2986 if (ioc->smp_affinity_enable) {
2989 * set irq affinity to local numa node for those irqs
2990 * corresponding to high iops queues.
2992 if (ioc->high_iops_queues) {
2993 local_numa_node = dev_to_node(&ioc->pdev->dev);
2994 for (index = 0; index < ioc->high_iops_queues;
2996 irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
2997 index), cpumask_of_node(local_numa_node));
3001 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3002 const cpumask_t *mask;
3004 if (reply_q->msix_index < ioc->high_iops_queues)
3007 mask = pci_irq_get_affinity(ioc->pdev,
3008 reply_q->msix_index);
3010 ioc_warn(ioc, "no affinity for msi %x\n",
3011 reply_q->msix_index);
3015 for_each_cpu_and(cpu, mask, cpu_online_mask) {
3016 if (cpu >= ioc->cpu_msix_table_sz)
3018 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3025 cpu = cpumask_first(cpu_online_mask);
3026 nr_msix -= ioc->high_iops_queues;
3029 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3030 unsigned int i, group = nr_cpus / nr_msix;
3032 if (reply_q->msix_index < ioc->high_iops_queues)
3038 if (index < nr_cpus % nr_msix)
3041 for (i = 0 ; i < group ; i++) {
3042 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3043 cpu = cpumask_next(cpu, cpu_online_mask);
3050 * _base_check_and_enable_high_iops_queues - enable high iops mode
3051 * @ ioc - per adapter object
3052 * @ hba_msix_vector_count - msix vectors supported by HBA
3054 * Enable high iops queues only if
3055 * - HBA is a SEA/AERO controller and
3056 * - MSI-Xs vector supported by the HBA is 128 and
3057 * - total CPU count in the system >=16 and
3058 * - loaded driver with default max_msix_vectors module parameter and
3059 * - system booted in non kdump mode
3064 _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
3065 int hba_msix_vector_count)
3069 if (perf_mode == MPT_PERF_MODE_IOPS ||
3070 perf_mode == MPT_PERF_MODE_LATENCY) {
3071 ioc->high_iops_queues = 0;
3075 if (perf_mode == MPT_PERF_MODE_DEFAULT) {
3077 pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
3078 speed = lnksta & PCI_EXP_LNKSTA_CLS;
3081 ioc->high_iops_queues = 0;
3086 if (!reset_devices && ioc->is_aero_ioc &&
3087 hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
3088 num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
3089 max_msix_vectors == -1)
3090 ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
3092 ioc->high_iops_queues = 0;
3096 * _base_disable_msix - disables msix
3097 * @ioc: per adapter object
3101 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
3103 if (!ioc->msix_enable)
3105 pci_free_irq_vectors(ioc->pdev);
3106 ioc->msix_enable = 0;
3110 * _base_alloc_irq_vectors - allocate msix vectors
3111 * @ioc: per adapter object
3115 _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
3117 int i, irq_flags = PCI_IRQ_MSIX;
3118 struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
3119 struct irq_affinity *descp = &desc;
3121 if (ioc->smp_affinity_enable)
3122 irq_flags |= PCI_IRQ_AFFINITY;
3126 ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
3127 ioc->reply_queue_count);
3129 i = pci_alloc_irq_vectors_affinity(ioc->pdev,
3130 ioc->high_iops_queues,
3131 ioc->reply_queue_count, irq_flags, descp);
3137 * _base_enable_msix - enables msix, failback to io_apic
3138 * @ioc: per adapter object
3142 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3145 int i, local_max_msix_vectors;
3148 ioc->msix_load_balance = false;
3150 if (msix_disable == -1 || msix_disable == 0)
3156 if (_base_check_enable_msix(ioc) != 0)
3159 ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
3160 pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
3161 ioc->cpu_count, max_msix_vectors);
3162 if (ioc->is_aero_ioc)
3163 _base_check_and_enable_high_iops_queues(ioc,
3164 ioc->msix_vector_count);
3165 ioc->reply_queue_count =
3166 min_t(int, ioc->cpu_count + ioc->high_iops_queues,
3167 ioc->msix_vector_count);
3169 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
3170 local_max_msix_vectors = (reset_devices) ? 1 : 8;
3172 local_max_msix_vectors = max_msix_vectors;
3174 if (local_max_msix_vectors > 0)
3175 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
3176 ioc->reply_queue_count);
3177 else if (local_max_msix_vectors == 0)
3181 * Enable msix_load_balance only if combined reply queue mode is
3182 * disabled on SAS3 & above generation HBA devices.
3184 if (!ioc->combined_reply_queue &&
3185 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3187 "combined ReplyQueue is off, Enabling msix load balance\n");
3188 ioc->msix_load_balance = true;
3192 * smp affinity setting is not need when msix load balance
3195 if (ioc->msix_load_balance)
3196 ioc->smp_affinity_enable = 0;
3198 r = _base_alloc_irq_vectors(ioc);
3200 ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
3204 ioc->msix_enable = 1;
3205 ioc->reply_queue_count = r;
3206 for (i = 0; i < ioc->reply_queue_count; i++) {
3207 r = _base_request_irq(ioc, i);
3209 _base_free_irq(ioc);
3210 _base_disable_msix(ioc);
3215 ioc_info(ioc, "High IOPs queues : %s\n",
3216 ioc->high_iops_queues ? "enabled" : "disabled");
3220 /* failback to io_apic interrupt routing */
3222 ioc->high_iops_queues = 0;
3223 ioc_info(ioc, "High IOPs queues : disabled\n");
3224 ioc->reply_queue_count = 1;
3225 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
3228 ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
3231 r = _base_request_irq(ioc, 0);
3237 * mpt3sas_base_unmap_resources - free controller resources
3238 * @ioc: per adapter object
3241 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
3243 struct pci_dev *pdev = ioc->pdev;
3245 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3247 _base_free_irq(ioc);
3248 _base_disable_msix(ioc);
3250 kfree(ioc->replyPostRegisterIndex);
3251 ioc->replyPostRegisterIndex = NULL;
3254 if (ioc->chip_phys) {
3259 if (pci_is_enabled(pdev)) {
3260 pci_release_selected_regions(ioc->pdev, ioc->bars);
3261 pci_disable_pcie_error_reporting(pdev);
3262 pci_disable_device(pdev);
3267 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3270 * _base_check_for_fault_and_issue_reset - check if IOC is in fault state
3271 * and if it is in fault state then issue diag reset.
3272 * @ioc: per adapter object
3274 * Returns: 0 for success, non-zero for failure.
3277 _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
3282 dinitprintk(ioc, pr_info("%s\n", __func__));
3283 if (ioc->pci_error_recovery)
3285 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3286 dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
3288 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3289 mpt3sas_print_fault_code(ioc, ioc_state &
3290 MPI2_DOORBELL_DATA_MASK);
3291 rc = _base_diag_reset(ioc);
3292 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3293 MPI2_IOC_STATE_COREDUMP) {
3294 mpt3sas_print_coredump_info(ioc, ioc_state &
3295 MPI2_DOORBELL_DATA_MASK);
3296 mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
3297 rc = _base_diag_reset(ioc);
3304 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
3305 * @ioc: per adapter object
3307 * Return: 0 for success, non-zero for failure.
3310 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3312 struct pci_dev *pdev = ioc->pdev;
3317 phys_addr_t chip_phys = 0;
3318 struct adapter_reply_queue *reply_q;
3320 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3322 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3323 if (pci_enable_device_mem(pdev)) {
3324 ioc_warn(ioc, "pci_enable_device_mem: failed\n");
3330 if (pci_request_selected_regions(pdev, ioc->bars,
3331 ioc->driver_name)) {
3332 ioc_warn(ioc, "pci_request_selected_regions: failed\n");
3338 /* AER (Advanced Error Reporting) hooks */
3339 pci_enable_pcie_error_reporting(pdev);
3341 pci_set_master(pdev);
3344 if (_base_config_dma_addressing(ioc, pdev) != 0) {
3345 ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
3350 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
3351 (!memap_sz || !pio_sz); i++) {
3352 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
3355 pio_chip = (u64)pci_resource_start(pdev, i);
3356 pio_sz = pci_resource_len(pdev, i);
3357 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3360 ioc->chip_phys = pci_resource_start(pdev, i);
3361 chip_phys = ioc->chip_phys;
3362 memap_sz = pci_resource_len(pdev, i);
3363 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
3367 if (ioc->chip == NULL) {
3369 "unable to map adapter memory! or resource not found\n");
3374 _base_mask_interrupts(ioc);
3376 r = _base_get_ioc_facts(ioc);
3378 rc = _base_check_for_fault_and_issue_reset(ioc);
3379 if (rc || (_base_get_ioc_facts(ioc)))
3383 if (!ioc->rdpq_array_enable_assigned) {
3384 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3385 ioc->rdpq_array_enable_assigned = 1;
3388 r = _base_enable_msix(ioc);
3392 if (!ioc->is_driver_loading)
3393 _base_init_irqpolls(ioc);
3394 /* Use the Combined reply queue feature only for SAS3 C0 & higher
3395 * revision HBAs and also only when reply queue count is greater than 8
3397 if (ioc->combined_reply_queue) {
3398 /* Determine the Supplemental Reply Post Host Index Registers
3399 * Addresse. Supplemental Reply Post Host Index Registers
3400 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
3401 * each register is at offset bytes of
3402 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
3404 ioc->replyPostRegisterIndex = kcalloc(
3405 ioc->combined_reply_index_count,
3406 sizeof(resource_size_t *), GFP_KERNEL);
3407 if (!ioc->replyPostRegisterIndex) {
3409 "allocation for replyPostRegisterIndex failed!\n");
3414 for (i = 0; i < ioc->combined_reply_index_count; i++) {
3415 ioc->replyPostRegisterIndex[i] = (resource_size_t *)
3416 ((u8 __force *)&ioc->chip->Doorbell +
3417 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3418 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3422 if (ioc->is_warpdrive) {
3423 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3424 &ioc->chip->ReplyPostHostIndex;
3426 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3427 ioc->reply_post_host_index[i] =
3428 (resource_size_t __iomem *)
3429 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3433 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
3434 pr_info("%s: %s enabled: IRQ %d\n",
3436 ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3437 pci_irq_vector(ioc->pdev, reply_q->msix_index));
3439 ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3440 &chip_phys, ioc->chip, memap_sz);
3441 ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3442 (unsigned long long)pio_chip, pio_sz);
3444 /* Save PCI configuration state for recovery from PCI AER/EEH errors */
3445 pci_save_state(pdev);
3449 mpt3sas_base_unmap_resources(ioc);
3454 * mpt3sas_base_get_msg_frame - obtain request mf pointer
3455 * @ioc: per adapter object
3456 * @smid: system request message index(smid zero is invalid)
3458 * Return: virt pointer to message frame.
3461 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3463 return (void *)(ioc->request + (smid * ioc->request_sz));
3467 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
3468 * @ioc: per adapter object
3469 * @smid: system request message index
3471 * Return: virt pointer to sense buffer.
3474 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3476 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3480 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
3481 * @ioc: per adapter object
3482 * @smid: system request message index
3484 * Return: phys pointer to the low 32bit address of the sense buffer.
3487 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3489 return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3490 SCSI_SENSE_BUFFERSIZE));
3494 * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
3495 * @ioc: per adapter object
3496 * @smid: system request message index
3498 * Return: virt pointer to a PCIe SGL.
3501 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3503 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3507 * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
3508 * @ioc: per adapter object
3509 * @smid: system request message index
3511 * Return: phys pointer to the address of the PCIe buffer.
3514 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3516 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3520 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
3521 * @ioc: per adapter object
3522 * @phys_addr: lower 32 physical addr of the reply
3524 * Converts 32bit lower physical addr into a virt address.
3527 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3531 return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3535 * _base_get_msix_index - get the msix index
3536 * @ioc: per adapter object
3537 * @scmd: scsi_cmnd object
3539 * returns msix index of general reply queues,
3540 * i.e. reply queue on which IO request's reply
3541 * should be posted by the HBA firmware.
3544 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
3545 struct scsi_cmnd *scmd)
3547 /* Enables reply_queue load balancing */
3548 if (ioc->msix_load_balance)
3549 return ioc->reply_queue_count ?
3550 base_mod64(atomic64_add_return(1,
3551 &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
3553 return ioc->cpu_msix_table[raw_smp_processor_id()];
3557 * _base_sdev_nr_inflight_request -get number of inflight requests
3558 * of a request queue.
3559 * @q: request_queue object
3561 * returns number of inflight request of a request queue.
3563 inline unsigned long
3564 _base_sdev_nr_inflight_request(struct request_queue *q)
3566 struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[0];
3568 return atomic_read(&hctx->nr_active);
3573 * _base_get_high_iops_msix_index - get the msix index of
3575 * @ioc: per adapter object
3576 * @scmd: scsi_cmnd object
3578 * Returns: msix index of high iops reply queues.
3579 * i.e. high iops reply queue on which IO request's
3580 * reply should be posted by the HBA firmware.
3583 _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
3584 struct scsi_cmnd *scmd)
3587 * Round robin the IO interrupts among the high iops
3588 * reply queues in terms of batch count 16 when outstanding
3589 * IOs on the target device is >=8.
3591 if (_base_sdev_nr_inflight_request(scmd->device->request_queue) >
3592 MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
3594 atomic64_add_return(1, &ioc->high_iops_outstanding) /
3595 MPT3SAS_HIGH_IOPS_BATCH_COUNT),
3596 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
3598 return _base_get_msix_index(ioc, scmd);
3602 * mpt3sas_base_get_smid - obtain a free smid from internal queue
3603 * @ioc: per adapter object
3604 * @cb_idx: callback index
3606 * Return: smid (zero is invalid)
3609 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3611 unsigned long flags;
3612 struct request_tracker *request;
3615 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3616 if (list_empty(&ioc->internal_free_list)) {
3617 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3618 ioc_err(ioc, "%s: smid not available\n", __func__);
3622 request = list_entry(ioc->internal_free_list.next,
3623 struct request_tracker, tracker_list);
3624 request->cb_idx = cb_idx;
3625 smid = request->smid;
3626 list_del(&request->tracker_list);
3627 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3632 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
3633 * @ioc: per adapter object
3634 * @cb_idx: callback index
3635 * @scmd: pointer to scsi command object
3637 * Return: smid (zero is invalid)
3640 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3641 struct scsi_cmnd *scmd)
3643 struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3644 unsigned int tag = scmd->request->tag;
3648 request->cb_idx = cb_idx;
3649 request->smid = smid;
3650 request->scmd = scmd;
3651 INIT_LIST_HEAD(&request->chain_list);
3656 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
3657 * @ioc: per adapter object
3658 * @cb_idx: callback index
3660 * Return: smid (zero is invalid)
3663 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3665 unsigned long flags;
3666 struct request_tracker *request;
3669 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3670 if (list_empty(&ioc->hpr_free_list)) {
3671 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3675 request = list_entry(ioc->hpr_free_list.next,
3676 struct request_tracker, tracker_list);
3677 request->cb_idx = cb_idx;
3678 smid = request->smid;
3679 list_del(&request->tracker_list);
3680 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3685 _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3688 * See _wait_for_commands_to_complete() call with regards to this code.
3690 if (ioc->shost_recovery && ioc->pending_io_count) {
3691 ioc->pending_io_count = scsi_host_busy(ioc->shost);
3692 if (ioc->pending_io_count == 0)
3693 wake_up(&ioc->reset_wq);
3697 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3698 struct scsiio_tracker *st)
3700 if (WARN_ON(st->smid == 0))
3705 atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
3710 * mpt3sas_base_free_smid - put smid back on free_list
3711 * @ioc: per adapter object
3712 * @smid: system request message index
3715 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3717 unsigned long flags;
3720 if (smid < ioc->hi_priority_smid) {
3721 struct scsiio_tracker *st;
3724 st = _get_st_from_smid(ioc, smid);
3726 _base_recovery_check(ioc);
3730 /* Clear MPI request frame */
3731 request = mpt3sas_base_get_msg_frame(ioc, smid);
3732 memset(request, 0, ioc->request_sz);
3734 mpt3sas_base_clear_st(ioc, st);
3735 _base_recovery_check(ioc);
3739 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3740 if (smid < ioc->internal_smid) {
3742 i = smid - ioc->hi_priority_smid;
3743 ioc->hpr_lookup[i].cb_idx = 0xFF;
3744 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
3745 } else if (smid <= ioc->hba_queue_depth) {
3746 /* internal queue */
3747 i = smid - ioc->internal_smid;
3748 ioc->internal_lookup[i].cb_idx = 0xFF;
3749 list_add(&ioc->internal_lookup[i].tracker_list,
3750 &ioc->internal_free_list);
3752 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3756 * _base_mpi_ep_writeq - 32 bit write to MMIO
3758 * @addr: address in MMIO space
3759 * @writeq_lock: spin lock
3761 * This special handling for MPI EP to take care of 32 bit
3762 * environment where its not quarenteed to send the entire word
3766 _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3767 spinlock_t *writeq_lock)
3769 unsigned long flags;
3771 spin_lock_irqsave(writeq_lock, flags);
3772 __raw_writel((u32)(b), addr);
3773 __raw_writel((u32)(b >> 32), (addr + 4));
3774 spin_unlock_irqrestore(writeq_lock, flags);
3778 * _base_writeq - 64 bit write to MMIO
3780 * @addr: address in MMIO space
3781 * @writeq_lock: spin lock
3783 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
3784 * care of 32 bit environment where its not quarenteed to send the entire word
3787 #if defined(writeq) && defined(CONFIG_64BIT)
3789 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3792 __raw_writeq(b, addr);
3797 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3799 _base_mpi_ep_writeq(b, addr, writeq_lock);
3804 * _base_set_and_get_msix_index - get the msix index and assign to msix_io
3805 * variable of scsi tracker
3806 * @ioc: per adapter object
3807 * @smid: system request message index
3809 * returns msix index.
3812 _base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3814 struct scsiio_tracker *st = NULL;
3816 if (smid < ioc->hi_priority_smid)
3817 st = _get_st_from_smid(ioc, smid);
3820 return _base_get_msix_index(ioc, NULL);
3822 st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
3827 * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
3828 * @ioc: per adapter object
3829 * @smid: system request message index
3830 * @handle: device handle
3833 _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
3834 u16 smid, u16 handle)
3836 Mpi2RequestDescriptorUnion_t descriptor;
3837 u64 *request = (u64 *)&descriptor;
3838 void *mpi_req_iomem;
3839 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3841 _clone_sg_entries(ioc, (void *) mfp, smid);
3842 mpi_req_iomem = (void __force *)ioc->chip +
3843 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3844 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3846 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3847 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3848 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3849 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3850 descriptor.SCSIIO.LMID = 0;
3851 _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3852 &ioc->scsi_lookup_lock);
3856 * _base_put_smid_scsi_io - send SCSI_IO request to firmware
3857 * @ioc: per adapter object
3858 * @smid: system request message index
3859 * @handle: device handle
3862 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3864 Mpi2RequestDescriptorUnion_t descriptor;
3865 u64 *request = (u64 *)&descriptor;
3868 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3869 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3870 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3871 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3872 descriptor.SCSIIO.LMID = 0;
3873 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3874 &ioc->scsi_lookup_lock);
3878 * _base_put_smid_fast_path - send fast path request to firmware
3879 * @ioc: per adapter object
3880 * @smid: system request message index
3881 * @handle: device handle
3884 _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3887 Mpi2RequestDescriptorUnion_t descriptor;
3888 u64 *request = (u64 *)&descriptor;
3890 descriptor.SCSIIO.RequestFlags =
3891 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
3892 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3893 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3894 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3895 descriptor.SCSIIO.LMID = 0;
3896 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3897 &ioc->scsi_lookup_lock);
3901 * _base_put_smid_hi_priority - send Task Management request to firmware
3902 * @ioc: per adapter object
3903 * @smid: system request message index
3904 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
3907 _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3910 Mpi2RequestDescriptorUnion_t descriptor;
3911 void *mpi_req_iomem;
3914 if (ioc->is_mcpu_endpoint) {
3915 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3917 /* TBD 256 is offset within sys register. */
3918 mpi_req_iomem = (void __force *)ioc->chip
3919 + MPI_FRAME_START_OFFSET
3920 + (smid * ioc->request_sz);
3921 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3925 request = (u64 *)&descriptor;
3927 descriptor.HighPriority.RequestFlags =
3928 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3929 descriptor.HighPriority.MSIxIndex = msix_task;
3930 descriptor.HighPriority.SMID = cpu_to_le16(smid);
3931 descriptor.HighPriority.LMID = 0;
3932 descriptor.HighPriority.Reserved1 = 0;
3933 if (ioc->is_mcpu_endpoint)
3934 _base_mpi_ep_writeq(*request,
3935 &ioc->chip->RequestDescriptorPostLow,
3936 &ioc->scsi_lookup_lock);
3938 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3939 &ioc->scsi_lookup_lock);
3943 * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
3945 * @ioc: per adapter object
3946 * @smid: system request message index
3949 mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3951 Mpi2RequestDescriptorUnion_t descriptor;
3952 u64 *request = (u64 *)&descriptor;
3954 descriptor.Default.RequestFlags =
3955 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
3956 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3957 descriptor.Default.SMID = cpu_to_le16(smid);
3958 descriptor.Default.LMID = 0;
3959 descriptor.Default.DescriptorTypeDependent = 0;
3960 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3961 &ioc->scsi_lookup_lock);
3965 * _base_put_smid_default - Default, primarily used for config pages
3966 * @ioc: per adapter object
3967 * @smid: system request message index
3970 _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3972 Mpi2RequestDescriptorUnion_t descriptor;
3973 void *mpi_req_iomem;
3976 if (ioc->is_mcpu_endpoint) {
3977 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3979 _clone_sg_entries(ioc, (void *) mfp, smid);
3980 /* TBD 256 is offset within sys register */
3981 mpi_req_iomem = (void __force *)ioc->chip +
3982 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3983 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3986 request = (u64 *)&descriptor;
3987 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3988 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3989 descriptor.Default.SMID = cpu_to_le16(smid);
3990 descriptor.Default.LMID = 0;
3991 descriptor.Default.DescriptorTypeDependent = 0;
3992 if (ioc->is_mcpu_endpoint)
3993 _base_mpi_ep_writeq(*request,
3994 &ioc->chip->RequestDescriptorPostLow,
3995 &ioc->scsi_lookup_lock);
3997 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3998 &ioc->scsi_lookup_lock);
4002 * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
4003 * Atomic Request Descriptor
4004 * @ioc: per adapter object
4005 * @smid: system request message index
4006 * @handle: device handle, unused in this function, for function type match
4011 _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4014 Mpi26AtomicRequestDescriptor_t descriptor;
4015 u32 *request = (u32 *)&descriptor;
4017 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4018 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4019 descriptor.SMID = cpu_to_le16(smid);
4021 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4025 * _base_put_smid_fast_path_atomic - send fast path request to firmware
4026 * using Atomic Request Descriptor
4027 * @ioc: per adapter object
4028 * @smid: system request message index
4029 * @handle: device handle, unused in this function, for function type match
4033 _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4036 Mpi26AtomicRequestDescriptor_t descriptor;
4037 u32 *request = (u32 *)&descriptor;
4039 descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
4040 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4041 descriptor.SMID = cpu_to_le16(smid);
4043 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4047 * _base_put_smid_hi_priority_atomic - send Task Management request to
4048 * firmware using Atomic Request Descriptor
4049 * @ioc: per adapter object
4050 * @smid: system request message index
4051 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
4056 _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4059 Mpi26AtomicRequestDescriptor_t descriptor;
4060 u32 *request = (u32 *)&descriptor;
4062 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4063 descriptor.MSIxIndex = msix_task;
4064 descriptor.SMID = cpu_to_le16(smid);
4066 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4070 * _base_put_smid_default - Default, primarily used for config pages
4071 * use Atomic Request Descriptor
4072 * @ioc: per adapter object
4073 * @smid: system request message index
4078 _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4080 Mpi26AtomicRequestDescriptor_t descriptor;
4081 u32 *request = (u32 *)&descriptor;
4083 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4084 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4085 descriptor.SMID = cpu_to_le16(smid);
4087 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4091 * _base_display_OEMs_branding - Display branding string
4092 * @ioc: per adapter object
4095 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
4097 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
4100 switch (ioc->pdev->subsystem_vendor) {
4101 case PCI_VENDOR_ID_INTEL:
4102 switch (ioc->pdev->device) {
4103 case MPI2_MFGPAGE_DEVID_SAS2008:
4104 switch (ioc->pdev->subsystem_device) {
4105 case MPT2SAS_INTEL_RMS2LL080_SSDID:
4106 ioc_info(ioc, "%s\n",
4107 MPT2SAS_INTEL_RMS2LL080_BRANDING);
4109 case MPT2SAS_INTEL_RMS2LL040_SSDID:
4110 ioc_info(ioc, "%s\n",
4111 MPT2SAS_INTEL_RMS2LL040_BRANDING);
4113 case MPT2SAS_INTEL_SSD910_SSDID:
4114 ioc_info(ioc, "%s\n",
4115 MPT2SAS_INTEL_SSD910_BRANDING);
4118 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4119 ioc->pdev->subsystem_device);
4123 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4124 switch (ioc->pdev->subsystem_device) {
4125 case MPT2SAS_INTEL_RS25GB008_SSDID:
4126 ioc_info(ioc, "%s\n",
4127 MPT2SAS_INTEL_RS25GB008_BRANDING);
4129 case MPT2SAS_INTEL_RMS25JB080_SSDID:
4130 ioc_info(ioc, "%s\n",
4131 MPT2SAS_INTEL_RMS25JB080_BRANDING);
4133 case MPT2SAS_INTEL_RMS25JB040_SSDID:
4134 ioc_info(ioc, "%s\n",
4135 MPT2SAS_INTEL_RMS25JB040_BRANDING);
4137 case MPT2SAS_INTEL_RMS25KB080_SSDID:
4138 ioc_info(ioc, "%s\n",
4139 MPT2SAS_INTEL_RMS25KB080_BRANDING);
4141 case MPT2SAS_INTEL_RMS25KB040_SSDID:
4142 ioc_info(ioc, "%s\n",
4143 MPT2SAS_INTEL_RMS25KB040_BRANDING);
4145 case MPT2SAS_INTEL_RMS25LB040_SSDID:
4146 ioc_info(ioc, "%s\n",
4147 MPT2SAS_INTEL_RMS25LB040_BRANDING);
4149 case MPT2SAS_INTEL_RMS25LB080_SSDID:
4150 ioc_info(ioc, "%s\n",
4151 MPT2SAS_INTEL_RMS25LB080_BRANDING);
4154 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4155 ioc->pdev->subsystem_device);
4159 case MPI25_MFGPAGE_DEVID_SAS3008:
4160 switch (ioc->pdev->subsystem_device) {
4161 case MPT3SAS_INTEL_RMS3JC080_SSDID:
4162 ioc_info(ioc, "%s\n",
4163 MPT3SAS_INTEL_RMS3JC080_BRANDING);
4166 case MPT3SAS_INTEL_RS3GC008_SSDID:
4167 ioc_info(ioc, "%s\n",
4168 MPT3SAS_INTEL_RS3GC008_BRANDING);
4170 case MPT3SAS_INTEL_RS3FC044_SSDID:
4171 ioc_info(ioc, "%s\n",
4172 MPT3SAS_INTEL_RS3FC044_BRANDING);
4174 case MPT3SAS_INTEL_RS3UC080_SSDID:
4175 ioc_info(ioc, "%s\n",
4176 MPT3SAS_INTEL_RS3UC080_BRANDING);
4179 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4180 ioc->pdev->subsystem_device);
4185 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4186 ioc->pdev->subsystem_device);
4190 case PCI_VENDOR_ID_DELL:
4191 switch (ioc->pdev->device) {
4192 case MPI2_MFGPAGE_DEVID_SAS2008:
4193 switch (ioc->pdev->subsystem_device) {
4194 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
4195 ioc_info(ioc, "%s\n",
4196 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
4198 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
4199 ioc_info(ioc, "%s\n",
4200 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
4202 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
4203 ioc_info(ioc, "%s\n",
4204 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
4206 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
4207 ioc_info(ioc, "%s\n",
4208 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
4210 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
4211 ioc_info(ioc, "%s\n",
4212 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
4214 case MPT2SAS_DELL_PERC_H200_SSDID:
4215 ioc_info(ioc, "%s\n",
4216 MPT2SAS_DELL_PERC_H200_BRANDING);
4218 case MPT2SAS_DELL_6GBPS_SAS_SSDID:
4219 ioc_info(ioc, "%s\n",
4220 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
4223 ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
4224 ioc->pdev->subsystem_device);
4228 case MPI25_MFGPAGE_DEVID_SAS3008:
4229 switch (ioc->pdev->subsystem_device) {
4230 case MPT3SAS_DELL_12G_HBA_SSDID:
4231 ioc_info(ioc, "%s\n",
4232 MPT3SAS_DELL_12G_HBA_BRANDING);
4235 ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
4236 ioc->pdev->subsystem_device);
4241 ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
4242 ioc->pdev->subsystem_device);
4246 case PCI_VENDOR_ID_CISCO:
4247 switch (ioc->pdev->device) {
4248 case MPI25_MFGPAGE_DEVID_SAS3008:
4249 switch (ioc->pdev->subsystem_device) {
4250 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
4251 ioc_info(ioc, "%s\n",
4252 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
4254 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
4255 ioc_info(ioc, "%s\n",
4256 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
4258 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4259 ioc_info(ioc, "%s\n",
4260 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4263 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4264 ioc->pdev->subsystem_device);
4268 case MPI25_MFGPAGE_DEVID_SAS3108_1:
4269 switch (ioc->pdev->subsystem_device) {
4270 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4271 ioc_info(ioc, "%s\n",
4272 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4274 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
4275 ioc_info(ioc, "%s\n",
4276 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
4279 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4280 ioc->pdev->subsystem_device);
4285 ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
4286 ioc->pdev->subsystem_device);
4290 case MPT2SAS_HP_3PAR_SSVID:
4291 switch (ioc->pdev->device) {
4292 case MPI2_MFGPAGE_DEVID_SAS2004:
4293 switch (ioc->pdev->subsystem_device) {
4294 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
4295 ioc_info(ioc, "%s\n",
4296 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
4299 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4300 ioc->pdev->subsystem_device);
4304 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4305 switch (ioc->pdev->subsystem_device) {
4306 case MPT2SAS_HP_2_4_INTERNAL_SSDID:
4307 ioc_info(ioc, "%s\n",
4308 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
4310 case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
4311 ioc_info(ioc, "%s\n",
4312 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
4314 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
4315 ioc_info(ioc, "%s\n",
4316 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
4318 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
4319 ioc_info(ioc, "%s\n",
4320 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
4323 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4324 ioc->pdev->subsystem_device);
4329 ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
4330 ioc->pdev->subsystem_device);
4339 * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
4340 * version from FW Image Header.
4341 * @ioc: per adapter object
4343 * Return: 0 for success, non-zero for failure.
4346 _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
4348 Mpi2FWImageHeader_t *fw_img_hdr;
4349 Mpi26ComponentImageHeader_t *cmp_img_hdr;
4350 Mpi25FWUploadRequest_t *mpi_request;
4351 Mpi2FWUploadReply_t mpi_reply;
4353 u32 package_version = 0;
4354 void *fwpkg_data = NULL;
4355 dma_addr_t fwpkg_data_dma;
4356 u16 smid, ioc_status;
4359 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4361 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4362 ioc_err(ioc, "%s: internal command already in use\n", __func__);
4366 data_length = sizeof(Mpi2FWImageHeader_t);
4367 fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
4368 &fwpkg_data_dma, GFP_KERNEL);
4371 "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
4372 __FILE__, __LINE__, __func__);
4376 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4378 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4383 ioc->base_cmds.status = MPT3_CMD_PENDING;
4384 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4385 ioc->base_cmds.smid = smid;
4386 memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
4387 mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
4388 mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
4389 mpi_request->ImageSize = cpu_to_le32(data_length);
4390 ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
4392 init_completion(&ioc->base_cmds.done);
4393 ioc->put_smid_default(ioc, smid);
4394 /* Wait for 15 seconds */
4395 wait_for_completion_timeout(&ioc->base_cmds.done,
4396 FW_IMG_HDR_READ_TIMEOUT*HZ);
4397 ioc_info(ioc, "%s: complete\n", __func__);
4398 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4399 ioc_err(ioc, "%s: timeout\n", __func__);
4400 _debug_dump_mf(mpi_request,
4401 sizeof(Mpi25FWUploadRequest_t)/4);
4404 memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
4405 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
4406 memcpy(&mpi_reply, ioc->base_cmds.reply,
4407 sizeof(Mpi2FWUploadReply_t));
4408 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4409 MPI2_IOCSTATUS_MASK;
4410 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4411 fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
4412 if (le32_to_cpu(fw_img_hdr->Signature) ==
4413 MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
4415 (Mpi26ComponentImageHeader_t *)
4419 cmp_img_hdr->ApplicationSpecific);
4423 fw_img_hdr->PackageVersion.Word);
4424 if (package_version)
4426 "FW Package Ver(%02d.%02d.%02d.%02d)\n",
4427 ((package_version) & 0xFF000000) >> 24,
4428 ((package_version) & 0x00FF0000) >> 16,
4429 ((package_version) & 0x0000FF00) >> 8,
4430 (package_version) & 0x000000FF);
4432 _debug_dump_mf(&mpi_reply,
4433 sizeof(Mpi2FWUploadReply_t)/4);
4437 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4440 dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
4446 * _base_display_ioc_capabilities - Disply IOC's capabilities.
4447 * @ioc: per adapter object
4450 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
4454 u32 iounit_pg1_flags;
4457 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
4458 strncpy(desc, ioc->manu_pg0.ChipName, 16);
4459 ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
4461 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
4462 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
4463 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
4464 ioc->facts.FWVersion.Word & 0x000000FF,
4465 ioc->pdev->revision,
4466 (bios_version & 0xFF000000) >> 24,
4467 (bios_version & 0x00FF0000) >> 16,
4468 (bios_version & 0x0000FF00) >> 8,
4469 bios_version & 0x000000FF);
4471 _base_display_OEMs_branding(ioc);
4473 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4474 pr_info("%sNVMe", i ? "," : "");
4478 ioc_info(ioc, "Protocol=(");
4480 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
4481 pr_cont("Initiator");
4485 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
4486 pr_cont("%sTarget", i ? "," : "");
4491 pr_cont("), Capabilities=(");
4493 if (!ioc->hide_ir_msg) {
4494 if (ioc->facts.IOCCapabilities &
4495 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
4501 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
4502 pr_cont("%sTLR", i ? "," : "");
4506 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
4507 pr_cont("%sMulticast", i ? "," : "");
4511 if (ioc->facts.IOCCapabilities &
4512 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
4513 pr_cont("%sBIDI Target", i ? "," : "");
4517 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4518 pr_cont("%sEEDP", i ? "," : "");
4522 if (ioc->facts.IOCCapabilities &
4523 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
4524 pr_cont("%sSnapshot Buffer", i ? "," : "");
4528 if (ioc->facts.IOCCapabilities &
4529 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
4530 pr_cont("%sDiag Trace Buffer", i ? "," : "");
4534 if (ioc->facts.IOCCapabilities &
4535 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
4536 pr_cont("%sDiag Extended Buffer", i ? "," : "");
4540 if (ioc->facts.IOCCapabilities &
4541 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
4542 pr_cont("%sTask Set Full", i ? "," : "");
4546 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4547 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
4548 pr_cont("%sNCQ", i ? "," : "");
4556 * mpt3sas_base_update_missing_delay - change the missing delay timers
4557 * @ioc: per adapter object
4558 * @device_missing_delay: amount of time till device is reported missing
4559 * @io_missing_delay: interval IO is returned when there is a missing device
4561 * Passed on the command line, this function will modify the device missing
4562 * delay, as well as the io missing delay. This should be called at driver
4566 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4567 u16 device_missing_delay, u8 io_missing_delay)
4569 u16 dmd, dmd_new, dmd_orignal;
4570 u8 io_missing_delay_original;
4572 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4573 Mpi2ConfigReply_t mpi_reply;
4577 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4581 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4582 sizeof(Mpi2SasIOUnit1PhyData_t));
4583 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4584 if (!sas_iounit_pg1) {
4585 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4586 __FILE__, __LINE__, __func__);
4589 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4590 sas_iounit_pg1, sz))) {
4591 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4592 __FILE__, __LINE__, __func__);
4595 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4596 MPI2_IOCSTATUS_MASK;
4597 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4598 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4599 __FILE__, __LINE__, __func__);
4603 /* device missing delay */
4604 dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4605 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4606 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4608 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4610 if (device_missing_delay > 0x7F) {
4611 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4612 device_missing_delay;
4614 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4616 dmd = device_missing_delay;
4617 sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4619 /* io missing delay */
4620 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4621 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4623 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4625 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4627 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4630 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4631 ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4632 dmd_orignal, dmd_new);
4633 ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4634 io_missing_delay_original,
4636 ioc->device_missing_delay = dmd_new;
4637 ioc->io_missing_delay = io_missing_delay;
4641 kfree(sas_iounit_pg1);
4645 * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields
4646 * according to performance mode.
4647 * @ioc : per adapter object
4652 _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
4654 Mpi2IOCPage1_t ioc_pg1;
4655 Mpi2ConfigReply_t mpi_reply;
4657 mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
4658 memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
4660 switch (perf_mode) {
4661 case MPT_PERF_MODE_DEFAULT:
4662 case MPT_PERF_MODE_BALANCED:
4663 if (ioc->high_iops_queues) {
4665 "Enable interrupt coalescing only for first\t"
4666 "%d reply queues\n",
4667 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
4669 * If 31st bit is zero then interrupt coalescing is
4670 * enabled for all reply descriptor post queues.
4671 * If 31st bit is set to one then user can
4672 * enable/disable interrupt coalescing on per reply
4673 * descriptor post queue group(8) basis. So to enable
4674 * interrupt coalescing only on first reply descriptor
4675 * post queue group 31st bit and zero th bit is enabled.
4677 ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
4678 ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
4679 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4680 ioc_info(ioc, "performance mode: balanced\n");
4684 case MPT_PERF_MODE_LATENCY:
4686 * Enable interrupt coalescing on all reply queues
4687 * with timeout value 0xA
4689 ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
4690 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4691 ioc_pg1.ProductSpecific = 0;
4692 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4693 ioc_info(ioc, "performance mode: latency\n");
4695 case MPT_PERF_MODE_IOPS:
4697 * Enable interrupt coalescing on all reply queues.
4700 "performance mode: iops with coalescing timeout: 0x%x\n",
4701 le32_to_cpu(ioc_pg1.CoalescingTimeout));
4702 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4703 ioc_pg1.ProductSpecific = 0;
4704 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4710 * _base_static_config_pages - static start of day config pages
4711 * @ioc: per adapter object
4714 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
4716 Mpi2ConfigReply_t mpi_reply;
4717 u32 iounit_pg1_flags;
4719 ioc->nvme_abort_timeout = 30;
4720 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
4721 if (ioc->ir_firmware)
4722 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
4726 * Ensure correct T10 PI operation if vendor left EEDPTagMode
4727 * flag unset in NVDATA.
4729 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
4730 if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
4731 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
4733 ioc->manu_pg11.EEDPTagMode &= ~0x3;
4734 ioc->manu_pg11.EEDPTagMode |= 0x1;
4735 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
4738 if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
4739 ioc->tm_custom_handling = 1;
4741 ioc->tm_custom_handling = 0;
4742 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
4743 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
4744 else if (ioc->manu_pg11.NVMeAbortTO >
4745 NVME_TASK_ABORT_MAX_TIMEOUT)
4746 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
4748 ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
4751 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
4752 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
4753 mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
4754 mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
4755 mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4756 mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
4757 _base_display_ioc_capabilities(ioc);
4760 * Enable task_set_full handling in iounit_pg1 when the
4761 * facts capabilities indicate that its supported.
4763 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4764 if ((ioc->facts.IOCCapabilities &
4765 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
4767 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4770 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4771 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
4772 mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4774 if (ioc->iounit_pg8.NumSensors)
4775 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
4776 if (ioc->is_aero_ioc)
4777 _base_update_ioc_page1_inlinewith_perf_mode(ioc);
4781 * mpt3sas_free_enclosure_list - release memory
4782 * @ioc: per adapter object
4784 * Free memory allocated during encloure add.
4787 mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
4789 struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
4791 /* Free enclosure list */
4792 list_for_each_entry_safe(enclosure_dev,
4793 enclosure_dev_next, &ioc->enclosure_list, list) {
4794 list_del(&enclosure_dev->list);
4795 kfree(enclosure_dev);
4800 * _base_release_memory_pools - release memory
4801 * @ioc: per adapter object
4803 * Free memory allocated from _base_allocate_memory_pools.
4806 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4810 int dma_alloc_count = 0;
4811 struct chain_tracker *ct;
4812 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
4814 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4817 dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
4818 ioc->request, ioc->request_dma);
4820 ioc_info(ioc, "request_pool(0x%p): free\n",
4822 ioc->request = NULL;
4826 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4827 dma_pool_destroy(ioc->sense_dma_pool);
4829 ioc_info(ioc, "sense_pool(0x%p): free\n",
4835 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
4836 dma_pool_destroy(ioc->reply_dma_pool);
4838 ioc_info(ioc, "reply_pool(0x%p): free\n",
4843 if (ioc->reply_free) {
4844 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
4845 ioc->reply_free_dma);
4846 dma_pool_destroy(ioc->reply_free_dma_pool);
4848 ioc_info(ioc, "reply_free_pool(0x%p): free\n",
4850 ioc->reply_free = NULL;
4853 if (ioc->reply_post) {
4854 dma_alloc_count = DIV_ROUND_UP(count,
4855 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
4856 for (i = 0; i < count; i++) {
4857 if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
4858 && dma_alloc_count) {
4859 if (ioc->reply_post[i].reply_post_free) {
4861 ioc->reply_post_free_dma_pool,
4862 ioc->reply_post[i].reply_post_free,
4863 ioc->reply_post[i].reply_post_free_dma);
4864 dexitprintk(ioc, ioc_info(ioc,
4865 "reply_post_free_pool(0x%p): free\n",
4866 ioc->reply_post[i].reply_post_free));
4867 ioc->reply_post[i].reply_post_free =
4873 dma_pool_destroy(ioc->reply_post_free_dma_pool);
4874 if (ioc->reply_post_free_array &&
4875 ioc->rdpq_array_enable) {
4876 dma_pool_free(ioc->reply_post_free_array_dma_pool,
4877 ioc->reply_post_free_array,
4878 ioc->reply_post_free_array_dma);
4879 ioc->reply_post_free_array = NULL;
4881 dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
4882 kfree(ioc->reply_post);
4885 if (ioc->pcie_sgl_dma_pool) {
4886 for (i = 0; i < ioc->scsiio_depth; i++) {
4887 dma_pool_free(ioc->pcie_sgl_dma_pool,
4888 ioc->pcie_sg_lookup[i].pcie_sgl,
4889 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4891 dma_pool_destroy(ioc->pcie_sgl_dma_pool);
4894 if (ioc->config_page) {
4896 ioc_info(ioc, "config_page(0x%p): free\n",
4898 dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
4899 ioc->config_page, ioc->config_page_dma);
4902 kfree(ioc->hpr_lookup);
4903 ioc->hpr_lookup = NULL;
4904 kfree(ioc->internal_lookup);
4905 ioc->internal_lookup = NULL;
4906 if (ioc->chain_lookup) {
4907 for (i = 0; i < ioc->scsiio_depth; i++) {
4908 for (j = ioc->chains_per_prp_buffer;
4909 j < ioc->chains_needed_per_io; j++) {
4910 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4911 if (ct && ct->chain_buffer)
4912 dma_pool_free(ioc->chain_dma_pool,
4914 ct->chain_buffer_dma);
4916 kfree(ioc->chain_lookup[i].chains_per_smid);
4918 dma_pool_destroy(ioc->chain_dma_pool);
4919 kfree(ioc->chain_lookup);
4920 ioc->chain_lookup = NULL;
4925 * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
4926 * having same upper 32bits in their base memory address.
4927 * @reply_pool_start_address: Base address of a reply queue set
4928 * @pool_sz: Size of single Reply Descriptor Post Queues pool size
4930 * Return: 1 if reply queues in a set have a same upper 32bits in their base
4931 * memory address, else 0.
4935 mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
4937 long reply_pool_end_address;
4939 reply_pool_end_address = reply_pool_start_address + pool_sz;
4941 if (upper_32_bits(reply_pool_start_address) ==
4942 upper_32_bits(reply_pool_end_address))
4949 * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
4951 * @ioc: per adapter object
4952 * @sz: DMA Pool size
4953 * Return: 0 for success, non-zero for failure.
4956 base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
4959 u32 dma_alloc_count = 0;
4960 int reply_post_free_sz = ioc->reply_post_queue_depth *
4961 sizeof(Mpi2DefaultReplyDescriptor_t);
4962 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
4964 ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
4966 if (!ioc->reply_post)
4969 * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
4970 * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should
4971 * be within 4GB boundary i.e reply queues in a set must have same
4972 * upper 32-bits in their memory address. so here driver is allocating
4973 * the DMA'able memory for reply queues according.
4974 * Driver uses limitation of
4975 * VENTURA_SERIES to manage INVADER_SERIES as well.
4977 dma_alloc_count = DIV_ROUND_UP(count,
4978 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
4979 ioc->reply_post_free_dma_pool =
4980 dma_pool_create("reply_post_free pool",
4981 &ioc->pdev->dev, sz, 16, 0);
4982 if (!ioc->reply_post_free_dma_pool)
4984 for (i = 0; i < count; i++) {
4985 if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
4986 ioc->reply_post[i].reply_post_free =
4987 dma_pool_zalloc(ioc->reply_post_free_dma_pool,
4989 &ioc->reply_post[i].reply_post_free_dma);
4990 if (!ioc->reply_post[i].reply_post_free)
4993 * Each set of RDPQ pool must satisfy 4gb boundary
4995 * 1) Check if allocated resources for RDPQ pool are in
4996 * the same 4GB range.
4997 * 2) If #1 is true, continue with 64 bit DMA.
4998 * 3) If #1 is false, return 1. which means free all the
4999 * resources and set DMA mask to 32 and allocate.
5001 if (!mpt3sas_check_same_4gb_region(
5002 (long)ioc->reply_post[i].reply_post_free, sz)) {
5004 ioc_err(ioc, "bad Replypost free pool(0x%p)"
5005 "reply_post_free_dma = (0x%llx)\n",
5006 ioc->reply_post[i].reply_post_free,
5007 (unsigned long long)
5008 ioc->reply_post[i].reply_post_free_dma));
5014 ioc->reply_post[i].reply_post_free =
5015 (Mpi2ReplyDescriptorsUnion_t *)
5016 ((long)ioc->reply_post[i-1].reply_post_free
5017 + reply_post_free_sz);
5018 ioc->reply_post[i].reply_post_free_dma =
5020 (ioc->reply_post[i-1].reply_post_free_dma +
5021 reply_post_free_sz);
5028 * _base_allocate_memory_pools - allocate start of day memory pools
5029 * @ioc: per adapter object
5031 * Return: 0 success, anything else error.
5034 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5036 struct mpt3sas_facts *facts;
5037 u16 max_sge_elements;
5038 u16 chains_needed_per_io;
5039 u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
5042 u16 max_request_credit, nvme_blocks_needed;
5043 unsigned short sg_tablesize;
5047 struct chain_tracker *ct;
5049 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5053 facts = &ioc->facts;
5055 /* command line tunables for max sgl entries */
5056 if (max_sgl_entries != -1)
5057 sg_tablesize = max_sgl_entries;
5059 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
5060 sg_tablesize = MPT2SAS_SG_DEPTH;
5062 sg_tablesize = MPT3SAS_SG_DEPTH;
5065 /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
5067 sg_tablesize = min_t(unsigned short, sg_tablesize,
5068 MPT_KDUMP_MIN_PHYS_SEGMENTS);
5070 if (ioc->is_mcpu_endpoint)
5071 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5073 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
5074 sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5075 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
5076 sg_tablesize = min_t(unsigned short, sg_tablesize,
5078 ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
5079 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
5081 ioc->shost->sg_tablesize = sg_tablesize;
5084 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
5085 (facts->RequestCredit / 4));
5086 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
5087 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
5088 INTERNAL_SCSIIO_CMDS_COUNT)) {
5089 ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
5090 facts->RequestCredit);
5093 ioc->internal_depth = 10;
5096 ioc->hi_priority_depth = ioc->internal_depth - (5);
5097 /* command line tunables for max controller queue depth */
5098 if (max_queue_depth != -1 && max_queue_depth != 0) {
5099 max_request_credit = min_t(u16, max_queue_depth +
5100 ioc->internal_depth, facts->RequestCredit);
5101 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
5102 max_request_credit = MAX_HBA_QUEUE_DEPTH;
5103 } else if (reset_devices)
5104 max_request_credit = min_t(u16, facts->RequestCredit,
5105 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
5107 max_request_credit = min_t(u16, facts->RequestCredit,
5108 MAX_HBA_QUEUE_DEPTH);
5110 /* Firmware maintains additional facts->HighPriorityCredit number of
5111 * credits for HiPriprity Request messages, so hba queue depth will be
5112 * sum of max_request_credit and high priority queue depth.
5114 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
5116 /* request frame size */
5117 ioc->request_sz = facts->IOCRequestFrameSize * 4;
5119 /* reply frame size */
5120 ioc->reply_sz = facts->ReplyFrameSize * 4;
5122 /* chain segment size */
5123 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
5124 if (facts->IOCMaxChainSegmentSize)
5125 ioc->chain_segment_sz =
5126 facts->IOCMaxChainSegmentSize *
5129 /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
5130 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
5133 ioc->chain_segment_sz = ioc->request_sz;
5135 /* calculate the max scatter element size */
5136 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
5140 /* calculate number of sg elements left over in the 1st frame */
5141 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
5142 sizeof(Mpi2SGEIOUnion_t)) + sge_size);
5143 ioc->max_sges_in_main_message = max_sge_elements/sge_size;
5145 /* now do the same for a chain buffer */
5146 max_sge_elements = ioc->chain_segment_sz - sge_size;
5147 ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
5150 * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
5152 chains_needed_per_io = ((ioc->shost->sg_tablesize -
5153 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
5155 if (chains_needed_per_io > facts->MaxChainDepth) {
5156 chains_needed_per_io = facts->MaxChainDepth;
5157 ioc->shost->sg_tablesize = min_t(u16,
5158 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
5159 * chains_needed_per_io), ioc->shost->sg_tablesize);
5161 ioc->chains_needed_per_io = chains_needed_per_io;
5163 /* reply free queue sizing - taking into account for 64 FW events */
5164 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5166 /* mCPU manage single counters for simplicity */
5167 if (ioc->is_mcpu_endpoint)
5168 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
5170 /* calculate reply descriptor post queue depth */
5171 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
5172 ioc->reply_free_queue_depth + 1;
5173 /* align the reply post queue on the next 16 count boundary */
5174 if (ioc->reply_post_queue_depth % 16)
5175 ioc->reply_post_queue_depth += 16 -
5176 (ioc->reply_post_queue_depth % 16);
5179 if (ioc->reply_post_queue_depth >
5180 facts->MaxReplyDescriptorPostQueueDepth) {
5181 ioc->reply_post_queue_depth =
5182 facts->MaxReplyDescriptorPostQueueDepth -
5183 (facts->MaxReplyDescriptorPostQueueDepth % 16);
5184 ioc->hba_queue_depth =
5185 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
5186 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5190 "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
5191 "sge_per_io(%d), chains_per_io(%d)\n",
5192 ioc->max_sges_in_main_message,
5193 ioc->max_sges_in_chain_message,
5194 ioc->shost->sg_tablesize,
5195 ioc->chains_needed_per_io);
5197 /* reply post queue, 16 byte align */
5198 reply_post_free_sz = ioc->reply_post_queue_depth *
5199 sizeof(Mpi2DefaultReplyDescriptor_t);
5200 rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
5201 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
5202 rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
5203 ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
5204 if (ret == -EAGAIN) {
5206 * Free allocated bad RDPQ memory pools.
5207 * Change dma coherent mask to 32 bit and reallocate RDPQ
5209 _base_release_memory_pools(ioc);
5210 ioc->use_32bit_dma = true;
5211 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
5213 "32 DMA mask failed %s\n", pci_name(ioc->pdev));
5216 if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
5218 } else if (ret == -ENOMEM)
5220 total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
5221 DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
5222 ioc->scsiio_depth = ioc->hba_queue_depth -
5223 ioc->hi_priority_depth - ioc->internal_depth;
5225 /* set the scsi host can_queue depth
5226 * with some internal commands that could be outstanding
5228 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
5230 ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
5231 ioc->shost->can_queue));
5233 /* contiguous pool for request and chains, 16 byte align, one extra "
5236 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
5237 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
5239 /* hi-priority queue */
5240 sz += (ioc->hi_priority_depth * ioc->request_sz);
5242 /* internal queue */
5243 sz += (ioc->internal_depth * ioc->request_sz);
5245 ioc->request_dma_sz = sz;
5246 ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
5247 &ioc->request_dma, GFP_KERNEL);
5248 if (!ioc->request) {
5249 ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
5250 ioc->hba_queue_depth, ioc->chains_needed_per_io,
5251 ioc->request_sz, sz / 1024);
5252 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
5255 ioc->hba_queue_depth -= retry_sz;
5256 _base_release_memory_pools(ioc);
5257 goto retry_allocation;
5259 memset(ioc->request, 0, sz);
5262 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
5263 ioc->hba_queue_depth, ioc->chains_needed_per_io,
5264 ioc->request_sz, sz / 1024);
5266 /* hi-priority queue */
5267 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
5269 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
5272 /* internal queue */
5273 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
5275 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
5279 "request pool(0x%p) - dma(0x%llx): "
5280 "depth(%d), frame_size(%d), pool_size(%d kB)\n",
5281 ioc->request, (unsigned long long) ioc->request_dma,
5282 ioc->hba_queue_depth, ioc->request_sz,
5283 (ioc->hba_queue_depth * ioc->request_sz) / 1024);
5288 ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
5289 ioc->request, ioc->scsiio_depth));
5291 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
5292 sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
5293 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
5294 if (!ioc->chain_lookup) {
5295 ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
5299 sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
5300 for (i = 0; i < ioc->scsiio_depth; i++) {
5301 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
5302 if (!ioc->chain_lookup[i].chains_per_smid) {
5303 ioc_err(ioc, "chain_lookup: kzalloc failed\n");
5308 /* initialize hi-priority queue smid's */
5309 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
5310 sizeof(struct request_tracker), GFP_KERNEL);
5311 if (!ioc->hpr_lookup) {
5312 ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
5315 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
5317 ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
5319 ioc->hi_priority_depth, ioc->hi_priority_smid));
5321 /* initialize internal queue smid's */
5322 ioc->internal_lookup = kcalloc(ioc->internal_depth,
5323 sizeof(struct request_tracker), GFP_KERNEL);
5324 if (!ioc->internal_lookup) {
5325 ioc_err(ioc, "internal_lookup: kcalloc failed\n");
5328 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
5330 ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
5332 ioc->internal_depth, ioc->internal_smid));
5334 * The number of NVMe page sized blocks needed is:
5335 * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
5336 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
5337 * that is placed in the main message frame. 8 is the size of each PRP
5338 * entry or PRP list pointer entry. 8 is subtracted from page_size
5339 * because of the PRP list pointer entry at the end of a page, so this
5340 * is not counted as a PRP entry. The 1 added page is a round up.
5342 * To avoid allocation failures due to the amount of memory that could
5343 * be required for NVMe PRP's, only each set of NVMe blocks will be
5344 * contiguous, so a new set is allocated for each possible I/O.
5346 ioc->chains_per_prp_buffer = 0;
5347 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
5348 nvme_blocks_needed =
5349 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
5350 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
5351 nvme_blocks_needed++;
5353 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
5354 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
5355 if (!ioc->pcie_sg_lookup) {
5356 ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
5359 sz = nvme_blocks_needed * ioc->page_size;
5360 ioc->pcie_sgl_dma_pool =
5361 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
5362 if (!ioc->pcie_sgl_dma_pool) {
5363 ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
5367 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
5368 ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
5369 ioc->chains_needed_per_io);
5371 for (i = 0; i < ioc->scsiio_depth; i++) {
5372 ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
5373 ioc->pcie_sgl_dma_pool, GFP_KERNEL,
5374 &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5375 if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
5376 ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
5379 for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
5380 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5382 ioc->pcie_sg_lookup[i].pcie_sgl +
5383 (j * ioc->chain_segment_sz);
5384 ct->chain_buffer_dma =
5385 ioc->pcie_sg_lookup[i].pcie_sgl_dma +
5386 (j * ioc->chain_segment_sz);
5391 ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
5392 ioc->scsiio_depth, sz,
5393 (sz * ioc->scsiio_depth) / 1024));
5395 ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
5396 ioc->chains_per_prp_buffer));
5397 total_sz += sz * ioc->scsiio_depth;
5400 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
5401 ioc->chain_segment_sz, 16, 0);
5402 if (!ioc->chain_dma_pool) {
5403 ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
5406 for (i = 0; i < ioc->scsiio_depth; i++) {
5407 for (j = ioc->chains_per_prp_buffer;
5408 j < ioc->chains_needed_per_io; j++) {
5409 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5410 ct->chain_buffer = dma_pool_alloc(
5411 ioc->chain_dma_pool, GFP_KERNEL,
5412 &ct->chain_buffer_dma);
5413 if (!ct->chain_buffer) {
5414 ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
5418 total_sz += ioc->chain_segment_sz;
5422 ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
5423 ioc->chain_depth, ioc->chain_segment_sz,
5424 (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
5426 /* sense buffers, 4 byte align */
5427 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
5428 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
5430 if (!ioc->sense_dma_pool) {
5431 ioc_err(ioc, "sense pool: dma_pool_create failed\n");
5434 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
5437 ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
5440 /* sense buffer requires to be in same 4 gb region.
5441 * Below function will check the same.
5442 * In case of failure, new pci pool will be created with updated
5443 * alignment. Older allocation and pool will be destroyed.
5444 * Alignment will be used such a way that next allocation if
5445 * success, will always meet same 4gb region requirement.
5446 * Actual requirement is not alignment, but we need start and end of
5447 * DMA address must have same upper 32 bit address.
5449 if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
5450 //Release Sense pool & Reallocate
5451 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
5452 dma_pool_destroy(ioc->sense_dma_pool);
5455 ioc->sense_dma_pool =
5456 dma_pool_create("sense pool", &ioc->pdev->dev, sz,
5457 roundup_pow_of_two(sz), 0);
5458 if (!ioc->sense_dma_pool) {
5459 ioc_err(ioc, "sense pool: pci_pool_create failed\n");
5462 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
5465 ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
5470 "sense pool(0x%p)- dma(0x%llx): depth(%d),"
5471 "element_size(%d), pool_size(%d kB)\n",
5472 ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
5473 SCSI_SENSE_BUFFERSIZE, sz / 1024);
5477 /* reply pool, 4 byte align */
5478 sz = ioc->reply_free_queue_depth * ioc->reply_sz;
5479 ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
5481 if (!ioc->reply_dma_pool) {
5482 ioc_err(ioc, "reply pool: dma_pool_create failed\n");
5485 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
5488 ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
5491 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
5492 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
5494 ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
5495 ioc->reply, ioc->reply_free_queue_depth,
5496 ioc->reply_sz, sz / 1024));
5498 ioc_info(ioc, "reply_dma(0x%llx)\n",
5499 (unsigned long long)ioc->reply_dma));
5502 /* reply free queue, 16 byte align */
5503 sz = ioc->reply_free_queue_depth * 4;
5504 ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
5505 &ioc->pdev->dev, sz, 16, 0);
5506 if (!ioc->reply_free_dma_pool) {
5507 ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
5510 ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
5511 &ioc->reply_free_dma);
5512 if (!ioc->reply_free) {
5513 ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
5517 ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
5518 ioc->reply_free, ioc->reply_free_queue_depth,
5521 ioc_info(ioc, "reply_free_dma (0x%llx)\n",
5522 (unsigned long long)ioc->reply_free_dma));
5525 if (ioc->rdpq_array_enable) {
5526 reply_post_free_array_sz = ioc->reply_queue_count *
5527 sizeof(Mpi2IOCInitRDPQArrayEntry);
5528 ioc->reply_post_free_array_dma_pool =
5529 dma_pool_create("reply_post_free_array pool",
5530 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
5531 if (!ioc->reply_post_free_array_dma_pool) {
5533 ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
5536 ioc->reply_post_free_array =
5537 dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
5538 GFP_KERNEL, &ioc->reply_post_free_array_dma);
5539 if (!ioc->reply_post_free_array) {
5541 ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
5545 ioc->config_page_sz = 512;
5546 ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
5547 ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
5548 if (!ioc->config_page) {
5549 ioc_err(ioc, "config page: dma_pool_alloc failed\n");
5553 ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
5554 ioc->config_page, (unsigned long long)ioc->config_page_dma,
5555 ioc->config_page_sz);
5556 total_sz += ioc->config_page_sz;
5558 ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
5560 ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
5561 ioc->shost->can_queue, facts->RequestCredit);
5562 ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
5563 ioc->shost->sg_tablesize);
5571 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
5572 * @ioc: Pointer to MPT_ADAPTER structure
5573 * @cooked: Request raw or cooked IOC state
5575 * Return: all IOC Doorbell register bits if cooked==0, else just the
5576 * Doorbell bits in MPI_IOC_STATE_MASK.
5579 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
5583 s = ioc->base_readl(&ioc->chip->Doorbell);
5584 sc = s & MPI2_IOC_STATE_MASK;
5585 return cooked ? sc : s;
5589 * _base_wait_on_iocstate - waiting on a particular ioc state
5591 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
5592 * @timeout: timeout in second
5594 * Return: 0 for success, non-zero for failure.
5597 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
5603 cntdn = 1000 * timeout;
5605 current_state = mpt3sas_base_get_iocstate(ioc, 1);
5606 if (current_state == ioc_state)
5608 if (count && current_state == MPI2_IOC_STATE_FAULT)
5610 if (count && current_state == MPI2_IOC_STATE_COREDUMP)
5613 usleep_range(1000, 1500);
5617 return current_state;
5621 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
5622 * a write to the doorbell)
5623 * @ioc: per adapter object
5625 * Return: 0 for success, non-zero for failure.
5627 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
5631 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5637 cntdn = 1000 * timeout;
5639 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5640 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5642 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5643 __func__, count, timeout));
5647 usleep_range(1000, 1500);
5651 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5652 __func__, count, int_status);
5657 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5663 cntdn = 2000 * timeout;
5665 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5666 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5668 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5669 __func__, count, timeout));
5677 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5678 __func__, count, int_status);
5684 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
5685 * @ioc: per adapter object
5686 * @timeout: timeout in second
5688 * Return: 0 for success, non-zero for failure.
5690 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
5694 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
5701 cntdn = 1000 * timeout;
5703 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5704 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
5706 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5707 __func__, count, timeout));
5709 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5710 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
5711 if ((doorbell & MPI2_IOC_STATE_MASK) ==
5712 MPI2_IOC_STATE_FAULT) {
5713 mpt3sas_print_fault_code(ioc, doorbell);
5716 if ((doorbell & MPI2_IOC_STATE_MASK) ==
5717 MPI2_IOC_STATE_COREDUMP) {
5718 mpt3sas_print_coredump_info(ioc, doorbell);
5721 } else if (int_status == 0xFFFFFFFF)
5724 usleep_range(1000, 1500);
5729 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5730 __func__, count, int_status);
5735 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
5736 * @ioc: per adapter object
5737 * @timeout: timeout in second
5739 * Return: 0 for success, non-zero for failure.
5742 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
5748 cntdn = 1000 * timeout;
5750 doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
5751 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
5753 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5754 __func__, count, timeout));
5758 usleep_range(1000, 1500);
5762 ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
5763 __func__, count, doorbell_reg);
5768 * _base_send_ioc_reset - send doorbell reset
5769 * @ioc: per adapter object
5770 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
5771 * @timeout: timeout in second
5773 * Return: 0 for success, non-zero for failure.
5776 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
5780 unsigned long flags;
5782 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
5783 ioc_err(ioc, "%s: unknown reset_type\n", __func__);
5787 if (!(ioc->facts.IOCCapabilities &
5788 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
5791 ioc_info(ioc, "sending message unit reset !!\n");
5793 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
5794 &ioc->chip->Doorbell);
5795 if ((_base_wait_for_doorbell_ack(ioc, 15))) {
5800 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
5802 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
5803 __func__, ioc_state);
5809 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5810 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5812 * Wait for IOC state CoreDump to clear only during
5813 * HBA initialization & release time.
5815 if ((ioc_state & MPI2_IOC_STATE_MASK) ==
5816 MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
5817 ioc->fault_reset_work_q == NULL)) {
5818 spin_unlock_irqrestore(
5819 &ioc->ioc_reset_in_progress_lock, flags);
5820 mpt3sas_print_coredump_info(ioc, ioc_state);
5821 mpt3sas_base_wait_for_coredump_completion(ioc,
5824 &ioc->ioc_reset_in_progress_lock, flags);
5826 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5828 ioc_info(ioc, "message unit reset: %s\n",
5829 r == 0 ? "SUCCESS" : "FAILED");
5834 * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
5835 * @ioc: per adapter object
5836 * @wait_count: timeout in seconds
5838 * Return: Waits up to timeout seconds for the IOC to
5839 * become operational. Returns 0 if IOC is present
5840 * and operational; otherwise returns -EFAULT.
5844 mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
5846 int wait_state_count = 0;
5850 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5851 if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
5854 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
5855 __func__, ++wait_state_count);
5856 } while (--timeout);
5858 ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
5861 if (wait_state_count)
5862 ioc_info(ioc, "ioc is operational\n");
5867 * _base_handshake_req_reply_wait - send request thru doorbell interface
5868 * @ioc: per adapter object
5869 * @request_bytes: request length
5870 * @request: pointer having request payload
5871 * @reply_bytes: reply length
5872 * @reply: pointer to reply payload
5873 * @timeout: timeout in second
5875 * Return: 0 for success, non-zero for failure.
5878 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5879 u32 *request, int reply_bytes, u16 *reply, int timeout)
5881 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
5886 /* make sure doorbell is not in use */
5887 if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
5888 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
5892 /* clear pending doorbell interrupts from previous state changes */
5893 if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
5894 MPI2_HIS_IOC2SYS_DB_STATUS)
5895 writel(0, &ioc->chip->HostInterruptStatus);
5897 /* send message to ioc */
5898 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
5899 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
5900 &ioc->chip->Doorbell);
5902 if ((_base_spin_on_doorbell_int(ioc, 5))) {
5903 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5907 writel(0, &ioc->chip->HostInterruptStatus);
5909 if ((_base_wait_for_doorbell_ack(ioc, 5))) {
5910 ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
5915 /* send message 32-bits at a time */
5916 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
5917 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
5918 if ((_base_wait_for_doorbell_ack(ioc, 5)))
5923 ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
5928 /* now wait for the reply */
5929 if ((_base_wait_for_doorbell_int(ioc, timeout))) {
5930 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5935 /* read the first two 16-bits, it gives the total length of the reply */
5936 reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5937 & MPI2_DOORBELL_DATA_MASK);
5938 writel(0, &ioc->chip->HostInterruptStatus);
5939 if ((_base_wait_for_doorbell_int(ioc, 5))) {
5940 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5944 reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5945 & MPI2_DOORBELL_DATA_MASK);
5946 writel(0, &ioc->chip->HostInterruptStatus);
5948 for (i = 2; i < default_reply->MsgLength * 2; i++) {
5949 if ((_base_wait_for_doorbell_int(ioc, 5))) {
5950 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5954 if (i >= reply_bytes/2) /* overflow case */
5955 ioc->base_readl(&ioc->chip->Doorbell);
5957 reply[i] = le16_to_cpu(
5958 ioc->base_readl(&ioc->chip->Doorbell)
5959 & MPI2_DOORBELL_DATA_MASK);
5960 writel(0, &ioc->chip->HostInterruptStatus);
5963 _base_wait_for_doorbell_int(ioc, 5);
5964 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
5966 ioc_info(ioc, "doorbell is in use (line=%d)\n",
5969 writel(0, &ioc->chip->HostInterruptStatus);
5971 if (ioc->logging_level & MPT_DEBUG_INIT) {
5972 mfp = (__le32 *)reply;
5973 pr_info("\toffset:data\n");
5974 for (i = 0; i < reply_bytes/4; i++)
5975 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
5976 le32_to_cpu(mfp[i]));
5982 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
5983 * @ioc: per adapter object
5984 * @mpi_reply: the reply payload from FW
5985 * @mpi_request: the request payload sent to FW
5987 * The SAS IO Unit Control Request message allows the host to perform low-level
5988 * operations, such as resets on the PHYs of the IO Unit, also allows the host
5989 * to obtain the IOC assigned device handles for a device if it has other
5990 * identifying information about the device, in addition allows the host to
5991 * remove IOC resources associated with the device.
5993 * Return: 0 for success, non-zero for failure.
5996 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
5997 Mpi2SasIoUnitControlReply_t *mpi_reply,
5998 Mpi2SasIoUnitControlRequest_t *mpi_request)
6005 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6007 mutex_lock(&ioc->base_cmds.mutex);
6009 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6010 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6015 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6019 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6021 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6027 ioc->base_cmds.status = MPT3_CMD_PENDING;
6028 request = mpt3sas_base_get_msg_frame(ioc, smid);
6029 ioc->base_cmds.smid = smid;
6030 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
6031 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6032 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
6033 ioc->ioc_link_reset_in_progress = 1;
6034 init_completion(&ioc->base_cmds.done);
6035 ioc->put_smid_default(ioc, smid);
6036 wait_for_completion_timeout(&ioc->base_cmds.done,
6037 msecs_to_jiffies(10000));
6038 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6039 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
6040 ioc->ioc_link_reset_in_progress)
6041 ioc->ioc_link_reset_in_progress = 0;
6042 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6043 mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
6044 mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
6046 goto issue_host_reset;
6048 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6049 memcpy(mpi_reply, ioc->base_cmds.reply,
6050 sizeof(Mpi2SasIoUnitControlReply_t));
6052 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
6053 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6058 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6059 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6062 mutex_unlock(&ioc->base_cmds.mutex);
6067 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
6068 * @ioc: per adapter object
6069 * @mpi_reply: the reply payload from FW
6070 * @mpi_request: the request payload sent to FW
6072 * The SCSI Enclosure Processor request message causes the IOC to
6073 * communicate with SES devices to control LED status signals.
6075 * Return: 0 for success, non-zero for failure.
6078 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
6079 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
6086 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6088 mutex_lock(&ioc->base_cmds.mutex);
6090 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6091 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6096 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6100 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6102 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6108 ioc->base_cmds.status = MPT3_CMD_PENDING;
6109 request = mpt3sas_base_get_msg_frame(ioc, smid);
6110 ioc->base_cmds.smid = smid;
6111 memset(request, 0, ioc->request_sz);
6112 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
6113 init_completion(&ioc->base_cmds.done);
6114 ioc->put_smid_default(ioc, smid);
6115 wait_for_completion_timeout(&ioc->base_cmds.done,
6116 msecs_to_jiffies(10000));
6117 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6118 mpt3sas_check_cmd_timeout(ioc,
6119 ioc->base_cmds.status, mpi_request,
6120 sizeof(Mpi2SepRequest_t)/4, issue_reset);
6121 goto issue_host_reset;
6123 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6124 memcpy(mpi_reply, ioc->base_cmds.reply,
6125 sizeof(Mpi2SepReply_t));
6127 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
6128 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6133 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6134 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6137 mutex_unlock(&ioc->base_cmds.mutex);
6142 * _base_get_port_facts - obtain port facts reply and save in ioc
6143 * @ioc: per adapter object
6146 * Return: 0 for success, non-zero for failure.
6149 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
6151 Mpi2PortFactsRequest_t mpi_request;
6152 Mpi2PortFactsReply_t mpi_reply;
6153 struct mpt3sas_port_facts *pfacts;
6154 int mpi_reply_sz, mpi_request_sz, r;
6156 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6158 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
6159 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
6160 memset(&mpi_request, 0, mpi_request_sz);
6161 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
6162 mpi_request.PortNumber = port;
6163 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6164 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6167 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6171 pfacts = &ioc->pfacts[port];
6172 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
6173 pfacts->PortNumber = mpi_reply.PortNumber;
6174 pfacts->VP_ID = mpi_reply.VP_ID;
6175 pfacts->VF_ID = mpi_reply.VF_ID;
6176 pfacts->MaxPostedCmdBuffers =
6177 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
6183 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
6184 * @ioc: per adapter object
6187 * Return: 0 for success, non-zero for failure.
6190 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
6195 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6197 if (ioc->pci_error_recovery) {
6199 ioc_info(ioc, "%s: host in pci error recovery\n",
6204 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6206 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6207 __func__, ioc_state));
6209 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
6210 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6213 if (ioc_state & MPI2_DOORBELL_USED) {
6214 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
6215 goto issue_diag_reset;
6218 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6219 mpt3sas_print_fault_code(ioc, ioc_state &
6220 MPI2_DOORBELL_DATA_MASK);
6221 goto issue_diag_reset;
6222 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
6223 MPI2_IOC_STATE_COREDUMP) {
6225 "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
6226 __func__, ioc_state);
6230 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6233 ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6234 __func__, ioc_state));
6239 rc = _base_diag_reset(ioc);
6244 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
6245 * @ioc: per adapter object
6247 * Return: 0 for success, non-zero for failure.
6250 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
6252 Mpi2IOCFactsRequest_t mpi_request;
6253 Mpi2IOCFactsReply_t mpi_reply;
6254 struct mpt3sas_facts *facts;
6255 int mpi_reply_sz, mpi_request_sz, r;
6257 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6259 r = _base_wait_for_iocstate(ioc, 10);
6262 ioc_info(ioc, "%s: failed getting to correct state\n",
6266 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
6267 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
6268 memset(&mpi_request, 0, mpi_request_sz);
6269 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
6270 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6271 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6274 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6278 facts = &ioc->facts;
6279 memset(facts, 0, sizeof(struct mpt3sas_facts));
6280 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
6281 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
6282 facts->VP_ID = mpi_reply.VP_ID;
6283 facts->VF_ID = mpi_reply.VF_ID;
6284 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
6285 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
6286 facts->WhoInit = mpi_reply.WhoInit;
6287 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
6288 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
6289 if (ioc->msix_enable && (facts->MaxMSIxVectors <=
6290 MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
6291 ioc->combined_reply_queue = 0;
6292 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
6293 facts->MaxReplyDescriptorPostQueueDepth =
6294 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
6295 facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
6296 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
6297 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
6298 ioc->ir_firmware = 1;
6299 if ((facts->IOCCapabilities &
6300 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
6301 ioc->rdpq_array_capable = 1;
6302 if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
6303 && ioc->is_aero_ioc)
6304 ioc->atomic_desc_capable = 1;
6305 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
6306 facts->IOCRequestFrameSize =
6307 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
6308 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6309 facts->IOCMaxChainSegmentSize =
6310 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
6312 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
6313 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
6314 ioc->shost->max_id = -1;
6315 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
6316 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
6317 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
6318 facts->HighPriorityCredit =
6319 le16_to_cpu(mpi_reply.HighPriorityCredit);
6320 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
6321 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
6322 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
6325 * Get the Page Size from IOC Facts. If it's 0, default to 4k.
6327 ioc->page_size = 1 << facts->CurrentHostPageSize;
6328 if (ioc->page_size == 1) {
6329 ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
6330 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
6333 ioc_info(ioc, "CurrentHostPageSize(%d)\n",
6334 facts->CurrentHostPageSize));
6337 ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
6338 facts->RequestCredit, facts->MaxChainDepth));
6340 ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
6341 facts->IOCRequestFrameSize * 4,
6342 facts->ReplyFrameSize * 4));
6347 * _base_send_ioc_init - send ioc_init to firmware
6348 * @ioc: per adapter object
6350 * Return: 0 for success, non-zero for failure.
6353 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
6355 Mpi2IOCInitRequest_t mpi_request;
6356 Mpi2IOCInitReply_t mpi_reply;
6358 ktime_t current_time;
6360 u32 reply_post_free_array_sz = 0;
6362 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6364 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
6365 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
6366 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
6367 mpi_request.VF_ID = 0; /* TODO */
6368 mpi_request.VP_ID = 0;
6369 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
6370 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
6371 mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
6373 if (_base_is_controller_msix_enabled(ioc))
6374 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
6375 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
6376 mpi_request.ReplyDescriptorPostQueueDepth =
6377 cpu_to_le16(ioc->reply_post_queue_depth);
6378 mpi_request.ReplyFreeQueueDepth =
6379 cpu_to_le16(ioc->reply_free_queue_depth);
6381 mpi_request.SenseBufferAddressHigh =
6382 cpu_to_le32((u64)ioc->sense_dma >> 32);
6383 mpi_request.SystemReplyAddressHigh =
6384 cpu_to_le32((u64)ioc->reply_dma >> 32);
6385 mpi_request.SystemRequestFrameBaseAddress =
6386 cpu_to_le64((u64)ioc->request_dma);
6387 mpi_request.ReplyFreeQueueAddress =
6388 cpu_to_le64((u64)ioc->reply_free_dma);
6390 if (ioc->rdpq_array_enable) {
6391 reply_post_free_array_sz = ioc->reply_queue_count *
6392 sizeof(Mpi2IOCInitRDPQArrayEntry);
6393 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
6394 for (i = 0; i < ioc->reply_queue_count; i++)
6395 ioc->reply_post_free_array[i].RDPQBaseAddress =
6397 (u64)ioc->reply_post[i].reply_post_free_dma);
6398 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
6399 mpi_request.ReplyDescriptorPostQueueAddress =
6400 cpu_to_le64((u64)ioc->reply_post_free_array_dma);
6402 mpi_request.ReplyDescriptorPostQueueAddress =
6403 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
6407 * Set the flag to enable CoreDump state feature in IOC firmware.
6409 mpi_request.ConfigurationFlags |=
6410 cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
6412 /* This time stamp specifies number of milliseconds
6413 * since epoch ~ midnight January 1, 1970.
6415 current_time = ktime_get_real();
6416 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
6418 if (ioc->logging_level & MPT_DEBUG_INIT) {
6422 mfp = (__le32 *)&mpi_request;
6423 ioc_info(ioc, "\toffset:data\n");
6424 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
6425 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
6426 le32_to_cpu(mfp[i]));
6429 r = _base_handshake_req_reply_wait(ioc,
6430 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
6431 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
6434 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6438 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6439 if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
6440 mpi_reply.IOCLogInfo) {
6441 ioc_err(ioc, "%s: failed\n", __func__);
6449 * mpt3sas_port_enable_done - command completion routine for port enable
6450 * @ioc: per adapter object
6451 * @smid: system request message index
6452 * @msix_index: MSIX table index supplied by the OS
6453 * @reply: reply message frame(lower 32bit addr)
6455 * Return: 1 meaning mf should be freed from _base_interrupt
6456 * 0 means the mf is freed from this function.
6459 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
6462 MPI2DefaultReply_t *mpi_reply;
6465 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
6468 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
6472 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
6475 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
6476 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
6477 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
6478 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
6479 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
6480 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6481 ioc->port_enable_failed = 1;
6483 if (ioc->is_driver_loading) {
6484 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
6485 mpt3sas_port_enable_complete(ioc);
6488 ioc->start_scan_failed = ioc_status;
6489 ioc->start_scan = 0;
6493 complete(&ioc->port_enable_cmds.done);
6498 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
6499 * @ioc: per adapter object
6501 * Return: 0 for success, non-zero for failure.
6504 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
6506 Mpi2PortEnableRequest_t *mpi_request;
6507 Mpi2PortEnableReply_t *mpi_reply;
6512 ioc_info(ioc, "sending port enable !!\n");
6514 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6515 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6519 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
6521 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6525 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
6526 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6527 ioc->port_enable_cmds.smid = smid;
6528 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
6529 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
6531 init_completion(&ioc->port_enable_cmds.done);
6532 ioc->put_smid_default(ioc, smid);
6533 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
6534 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
6535 ioc_err(ioc, "%s: timeout\n", __func__);
6536 _debug_dump_mf(mpi_request,
6537 sizeof(Mpi2PortEnableRequest_t)/4);
6538 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
6545 mpi_reply = ioc->port_enable_cmds.reply;
6546 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
6547 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6548 ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
6549 __func__, ioc_status);
6555 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
6556 ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
6561 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
6562 * @ioc: per adapter object
6564 * Return: 0 for success, non-zero for failure.
6567 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
6569 Mpi2PortEnableRequest_t *mpi_request;
6572 ioc_info(ioc, "sending port enable !!\n");
6574 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6575 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6579 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
6581 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6585 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
6586 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6587 ioc->port_enable_cmds.smid = smid;
6588 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
6589 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
6591 ioc->put_smid_default(ioc, smid);
6596 * _base_determine_wait_on_discovery - desposition
6597 * @ioc: per adapter object
6599 * Decide whether to wait on discovery to complete. Used to either
6600 * locate boot device, or report volumes ahead of physical devices.
6602 * Return: 1 for wait, 0 for don't wait.
6605 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
6607 /* We wait for discovery to complete if IR firmware is loaded.
6608 * The sas topology events arrive before PD events, so we need time to
6609 * turn on the bit in ioc->pd_handles to indicate PD
6610 * Also, it maybe required to report Volumes ahead of physical
6611 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
6613 if (ioc->ir_firmware)
6616 /* if no Bios, then we don't need to wait */
6617 if (!ioc->bios_pg3.BiosVersion)
6620 /* Bios is present, then we drop down here.
6622 * If there any entries in the Bios Page 2, then we wait
6623 * for discovery to complete.
6626 /* Current Boot Device */
6627 if ((ioc->bios_pg2.CurrentBootDeviceForm &
6628 MPI2_BIOSPAGE2_FORM_MASK) ==
6629 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6630 /* Request Boot Device */
6631 (ioc->bios_pg2.ReqBootDeviceForm &
6632 MPI2_BIOSPAGE2_FORM_MASK) ==
6633 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6634 /* Alternate Request Boot Device */
6635 (ioc->bios_pg2.ReqAltBootDeviceForm &
6636 MPI2_BIOSPAGE2_FORM_MASK) ==
6637 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
6644 * _base_unmask_events - turn on notification for this event
6645 * @ioc: per adapter object
6646 * @event: firmware event
6648 * The mask is stored in ioc->event_masks.
6651 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
6658 desired_event = (1 << (event % 32));
6661 ioc->event_masks[0] &= ~desired_event;
6662 else if (event < 64)
6663 ioc->event_masks[1] &= ~desired_event;
6664 else if (event < 96)
6665 ioc->event_masks[2] &= ~desired_event;
6666 else if (event < 128)
6667 ioc->event_masks[3] &= ~desired_event;
6671 * _base_event_notification - send event notification
6672 * @ioc: per adapter object
6674 * Return: 0 for success, non-zero for failure.
6677 _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
6679 Mpi2EventNotificationRequest_t *mpi_request;
6684 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6686 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
6687 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6691 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6693 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6696 ioc->base_cmds.status = MPT3_CMD_PENDING;
6697 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6698 ioc->base_cmds.smid = smid;
6699 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
6700 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
6701 mpi_request->VF_ID = 0; /* TODO */
6702 mpi_request->VP_ID = 0;
6703 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
6704 mpi_request->EventMasks[i] =
6705 cpu_to_le32(ioc->event_masks[i]);
6706 init_completion(&ioc->base_cmds.done);
6707 ioc->put_smid_default(ioc, smid);
6708 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
6709 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6710 ioc_err(ioc, "%s: timeout\n", __func__);
6711 _debug_dump_mf(mpi_request,
6712 sizeof(Mpi2EventNotificationRequest_t)/4);
6713 if (ioc->base_cmds.status & MPT3_CMD_RESET)
6718 dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
6719 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6724 * mpt3sas_base_validate_event_type - validating event types
6725 * @ioc: per adapter object
6726 * @event_type: firmware event
6728 * This will turn on firmware event notification when application
6729 * ask for that event. We don't mask events that are already enabled.
6732 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
6735 u32 event_mask, desired_event;
6736 u8 send_update_to_fw;
6738 for (i = 0, send_update_to_fw = 0; i <
6739 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
6740 event_mask = ~event_type[i];
6742 for (j = 0; j < 32; j++) {
6743 if (!(event_mask & desired_event) &&
6744 (ioc->event_masks[i] & desired_event)) {
6745 ioc->event_masks[i] &= ~desired_event;
6746 send_update_to_fw = 1;
6748 desired_event = (desired_event << 1);
6752 if (!send_update_to_fw)
6755 mutex_lock(&ioc->base_cmds.mutex);
6756 _base_event_notification(ioc);
6757 mutex_unlock(&ioc->base_cmds.mutex);
6761 * _base_diag_reset - the "big hammer" start of day reset
6762 * @ioc: per adapter object
6764 * Return: 0 for success, non-zero for failure.
6767 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
6769 u32 host_diagnostic;
6774 ioc_info(ioc, "sending diag reset !!\n");
6776 drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
6780 /* Write magic sequence to WriteSequence register
6781 * Loop until in diagnostic mode
6783 drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
6784 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6785 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
6786 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
6787 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
6788 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
6789 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
6790 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
6797 "Stop writing magic sequence after 20 retries\n");
6801 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6803 ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
6804 count, host_diagnostic));
6806 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
6808 hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
6810 drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
6811 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
6812 &ioc->chip->HostDiagnostic);
6814 /*This delay allows the chip PCIe hardware time to finish reset tasks*/
6815 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
6817 /* Approximately 300 second max wait */
6818 for (count = 0; count < (300000000 /
6819 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
6821 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6823 if (host_diagnostic == 0xFFFFFFFF) {
6825 "Invalid host diagnostic register value\n");
6828 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
6831 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
6834 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
6837 ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
6838 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
6839 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
6840 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
6842 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
6843 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
6844 &ioc->chip->HCBSize);
6847 drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
6848 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
6849 &ioc->chip->HostDiagnostic);
6852 ioc_info(ioc, "disable writes to the diagnostic register\n"));
6853 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6855 drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
6856 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
6858 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6859 __func__, ioc_state);
6863 ioc_info(ioc, "diag reset: SUCCESS\n");
6867 ioc_err(ioc, "diag reset: FAILED\n");
6872 * _base_make_ioc_ready - put controller in READY state
6873 * @ioc: per adapter object
6874 * @type: FORCE_BIG_HAMMER or SOFT_RESET
6876 * Return: 0 for success, non-zero for failure.
6879 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
6885 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6887 if (ioc->pci_error_recovery)
6890 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6892 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6893 __func__, ioc_state));
6895 /* if in RESET state, it should move to READY state shortly */
6897 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
6898 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
6899 MPI2_IOC_STATE_READY) {
6900 if (count++ == 10) {
6901 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6902 __func__, ioc_state);
6906 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6910 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
6913 if (ioc_state & MPI2_DOORBELL_USED) {
6914 ioc_info(ioc, "unexpected doorbell active!\n");
6915 goto issue_diag_reset;
6918 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6919 mpt3sas_print_fault_code(ioc, ioc_state &
6920 MPI2_DOORBELL_DATA_MASK);
6921 goto issue_diag_reset;
6924 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
6926 * if host reset is invoked while watch dog thread is waiting
6927 * for IOC state to be changed to Fault state then driver has
6928 * to wait here for CoreDump state to clear otherwise reset
6929 * will be issued to the FW and FW move the IOC state to
6930 * reset state without copying the FW logs to coredump region.
6932 if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
6933 mpt3sas_print_coredump_info(ioc, ioc_state &
6934 MPI2_DOORBELL_DATA_MASK);
6935 mpt3sas_base_wait_for_coredump_completion(ioc,
6938 goto issue_diag_reset;
6941 if (type == FORCE_BIG_HAMMER)
6942 goto issue_diag_reset;
6944 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6945 if (!(_base_send_ioc_reset(ioc,
6946 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
6951 rc = _base_diag_reset(ioc);
6956 * _base_make_ioc_operational - put controller in OPERATIONAL state
6957 * @ioc: per adapter object
6959 * Return: 0 for success, non-zero for failure.
6962 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6964 int r, i, index, rc;
6965 unsigned long flags;
6968 struct _tr_list *delayed_tr, *delayed_tr_next;
6969 struct _sc_list *delayed_sc, *delayed_sc_next;
6970 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
6972 struct adapter_reply_queue *reply_q;
6973 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
6975 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6977 /* clean the delayed target reset list */
6978 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6979 &ioc->delayed_tr_list, list) {
6980 list_del(&delayed_tr->list);
6985 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6986 &ioc->delayed_tr_volume_list, list) {
6987 list_del(&delayed_tr->list);
6991 list_for_each_entry_safe(delayed_sc, delayed_sc_next,
6992 &ioc->delayed_sc_list, list) {
6993 list_del(&delayed_sc->list);
6997 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
6998 &ioc->delayed_event_ack_list, list) {
6999 list_del(&delayed_event_ack->list);
7000 kfree(delayed_event_ack);
7003 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7005 /* hi-priority queue */
7006 INIT_LIST_HEAD(&ioc->hpr_free_list);
7007 smid = ioc->hi_priority_smid;
7008 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
7009 ioc->hpr_lookup[i].cb_idx = 0xFF;
7010 ioc->hpr_lookup[i].smid = smid;
7011 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
7012 &ioc->hpr_free_list);
7015 /* internal queue */
7016 INIT_LIST_HEAD(&ioc->internal_free_list);
7017 smid = ioc->internal_smid;
7018 for (i = 0; i < ioc->internal_depth; i++, smid++) {
7019 ioc->internal_lookup[i].cb_idx = 0xFF;
7020 ioc->internal_lookup[i].smid = smid;
7021 list_add_tail(&ioc->internal_lookup[i].tracker_list,
7022 &ioc->internal_free_list);
7025 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7027 /* initialize Reply Free Queue */
7028 for (i = 0, reply_address = (u32)ioc->reply_dma ;
7029 i < ioc->reply_free_queue_depth ; i++, reply_address +=
7031 ioc->reply_free[i] = cpu_to_le32(reply_address);
7032 if (ioc->is_mcpu_endpoint)
7033 _base_clone_reply_to_sys_mem(ioc,
7037 /* initialize reply queues */
7038 if (ioc->is_driver_loading)
7039 _base_assign_reply_queues(ioc);
7041 /* initialize Reply Post Free Queue */
7043 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
7044 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7046 * If RDPQ is enabled, switch to the next allocation.
7047 * Otherwise advance within the contiguous region.
7049 if (ioc->rdpq_array_enable) {
7050 reply_q->reply_post_free =
7051 ioc->reply_post[index++].reply_post_free;
7053 reply_q->reply_post_free = reply_post_free_contig;
7054 reply_post_free_contig += ioc->reply_post_queue_depth;
7057 reply_q->reply_post_host_index = 0;
7058 for (i = 0; i < ioc->reply_post_queue_depth; i++)
7059 reply_q->reply_post_free[i].Words =
7060 cpu_to_le64(ULLONG_MAX);
7061 if (!_base_is_controller_msix_enabled(ioc))
7062 goto skip_init_reply_post_free_queue;
7064 skip_init_reply_post_free_queue:
7066 r = _base_send_ioc_init(ioc);
7069 * No need to check IOC state for fault state & issue
7070 * diag reset during host reset. This check is need
7071 * only during driver load time.
7073 if (!ioc->is_driver_loading)
7076 rc = _base_check_for_fault_and_issue_reset(ioc);
7077 if (rc || (_base_send_ioc_init(ioc)))
7081 /* initialize reply free host index */
7082 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
7083 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
7085 /* initialize reply post host index */
7086 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7087 if (ioc->combined_reply_queue)
7088 writel((reply_q->msix_index & 7)<<
7089 MPI2_RPHI_MSIX_INDEX_SHIFT,
7090 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
7092 writel(reply_q->msix_index <<
7093 MPI2_RPHI_MSIX_INDEX_SHIFT,
7094 &ioc->chip->ReplyPostHostIndex);
7096 if (!_base_is_controller_msix_enabled(ioc))
7097 goto skip_init_reply_post_host_index;
7100 skip_init_reply_post_host_index:
7102 _base_unmask_interrupts(ioc);
7104 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
7105 r = _base_display_fwpkg_version(ioc);
7110 _base_static_config_pages(ioc);
7111 r = _base_event_notification(ioc);
7115 if (ioc->is_driver_loading) {
7117 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
7120 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
7121 MFG_PAGE10_HIDE_SSDS_MASK);
7122 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
7123 ioc->mfg_pg10_hide_flag = hide_flag;
7126 ioc->wait_for_discovery_to_complete =
7127 _base_determine_wait_on_discovery(ioc);
7129 return r; /* scan_start and scan_finished support */
7132 r = _base_send_port_enable(ioc);
7140 * mpt3sas_base_free_resources - free resources controller resources
7141 * @ioc: per adapter object
7144 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
7146 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7148 /* synchronizing freeing resource with pci_access_mutex lock */
7149 mutex_lock(&ioc->pci_access_mutex);
7150 if (ioc->chip_phys && ioc->chip) {
7151 _base_mask_interrupts(ioc);
7152 ioc->shost_recovery = 1;
7153 _base_make_ioc_ready(ioc, SOFT_RESET);
7154 ioc->shost_recovery = 0;
7157 mpt3sas_base_unmap_resources(ioc);
7158 mutex_unlock(&ioc->pci_access_mutex);
7163 * mpt3sas_base_attach - attach controller instance
7164 * @ioc: per adapter object
7166 * Return: 0 for success, non-zero for failure.
7169 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
7172 int cpu_id, last_cpu_id = 0;
7174 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7176 /* setup cpu_msix_table */
7177 ioc->cpu_count = num_online_cpus();
7178 for_each_online_cpu(cpu_id)
7179 last_cpu_id = cpu_id;
7180 ioc->cpu_msix_table_sz = last_cpu_id + 1;
7181 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
7182 ioc->reply_queue_count = 1;
7183 if (!ioc->cpu_msix_table) {
7184 ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
7186 goto out_free_resources;
7189 if (ioc->is_warpdrive) {
7190 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
7191 sizeof(resource_size_t *), GFP_KERNEL);
7192 if (!ioc->reply_post_host_index) {
7193 ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
7195 goto out_free_resources;
7199 ioc->smp_affinity_enable = smp_affinity_enable;
7201 ioc->rdpq_array_enable_assigned = 0;
7202 ioc->use_32bit_dma = false;
7203 if (ioc->is_aero_ioc)
7204 ioc->base_readl = &_base_readl_aero;
7206 ioc->base_readl = &_base_readl;
7207 r = mpt3sas_base_map_resources(ioc);
7209 goto out_free_resources;
7211 pci_set_drvdata(ioc->pdev, ioc->shost);
7212 r = _base_get_ioc_facts(ioc);
7214 rc = _base_check_for_fault_and_issue_reset(ioc);
7215 if (rc || (_base_get_ioc_facts(ioc)))
7216 goto out_free_resources;
7219 switch (ioc->hba_mpi_version_belonged) {
7221 ioc->build_sg_scmd = &_base_build_sg_scmd;
7222 ioc->build_sg = &_base_build_sg;
7223 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
7224 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7230 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
7231 * Target Status - all require the IEEE formated scatter gather
7234 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
7235 ioc->build_sg = &_base_build_sg_ieee;
7236 ioc->build_nvme_prp = &_base_build_nvme_prp;
7237 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
7238 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
7239 if (ioc->high_iops_queues)
7240 ioc->get_msix_index_for_smlio =
7241 &_base_get_high_iops_msix_index;
7243 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7246 if (ioc->atomic_desc_capable) {
7247 ioc->put_smid_default = &_base_put_smid_default_atomic;
7248 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
7249 ioc->put_smid_fast_path =
7250 &_base_put_smid_fast_path_atomic;
7251 ioc->put_smid_hi_priority =
7252 &_base_put_smid_hi_priority_atomic;
7254 ioc->put_smid_default = &_base_put_smid_default;
7255 ioc->put_smid_fast_path = &_base_put_smid_fast_path;
7256 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
7257 if (ioc->is_mcpu_endpoint)
7258 ioc->put_smid_scsi_io =
7259 &_base_put_smid_mpi_ep_scsi_io;
7261 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
7264 * These function pointers for other requests that don't
7265 * the require IEEE scatter gather elements.
7267 * For example Configuration Pages and SAS IOUNIT Control don't.
7269 ioc->build_sg_mpi = &_base_build_sg;
7270 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
7272 r = _base_make_ioc_ready(ioc, SOFT_RESET);
7274 goto out_free_resources;
7276 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
7277 sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
7280 goto out_free_resources;
7283 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
7284 r = _base_get_port_facts(ioc, i);
7286 rc = _base_check_for_fault_and_issue_reset(ioc);
7287 if (rc || (_base_get_port_facts(ioc, i)))
7288 goto out_free_resources;
7292 r = _base_allocate_memory_pools(ioc);
7294 goto out_free_resources;
7296 if (irqpoll_weight > 0)
7297 ioc->thresh_hold = irqpoll_weight;
7299 ioc->thresh_hold = ioc->hba_queue_depth/4;
7301 _base_init_irqpolls(ioc);
7302 init_waitqueue_head(&ioc->reset_wq);
7304 /* allocate memory pd handle bitmask list */
7305 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
7306 if (ioc->facts.MaxDevHandle % 8)
7307 ioc->pd_handles_sz++;
7308 ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
7310 if (!ioc->pd_handles) {
7312 goto out_free_resources;
7314 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
7316 if (!ioc->blocking_handles) {
7318 goto out_free_resources;
7321 /* allocate memory for pending OS device add list */
7322 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
7323 if (ioc->facts.MaxDevHandle % 8)
7324 ioc->pend_os_device_add_sz++;
7325 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
7327 if (!ioc->pend_os_device_add)
7328 goto out_free_resources;
7330 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
7331 ioc->device_remove_in_progress =
7332 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
7333 if (!ioc->device_remove_in_progress)
7334 goto out_free_resources;
7336 ioc->fwfault_debug = mpt3sas_fwfault_debug;
7338 /* base internal command bits */
7339 mutex_init(&ioc->base_cmds.mutex);
7340 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7341 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7343 /* port_enable command bits */
7344 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7345 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
7347 /* transport internal command bits */
7348 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7349 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
7350 mutex_init(&ioc->transport_cmds.mutex);
7352 /* scsih internal command bits */
7353 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7354 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7355 mutex_init(&ioc->scsih_cmds.mutex);
7357 /* task management internal command bits */
7358 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7359 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
7360 mutex_init(&ioc->tm_cmds.mutex);
7362 /* config page internal command bits */
7363 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7364 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
7365 mutex_init(&ioc->config_cmds.mutex);
7367 /* ctl module internal command bits */
7368 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7369 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
7370 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
7371 mutex_init(&ioc->ctl_cmds.mutex);
7373 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
7374 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
7375 !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
7376 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
7378 goto out_free_resources;
7381 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
7382 ioc->event_masks[i] = -1;
7384 /* here we enable the events we care about */
7385 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
7386 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
7387 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
7388 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
7389 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
7390 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
7391 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
7392 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
7393 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
7394 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
7395 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
7396 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
7397 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
7398 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
7399 if (ioc->is_gen35_ioc) {
7400 _base_unmask_events(ioc,
7401 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
7402 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
7403 _base_unmask_events(ioc,
7404 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
7407 r = _base_make_ioc_operational(ioc);
7409 goto out_free_resources;
7412 * Copy current copy of IOCFacts in prev_fw_facts
7413 * and it will be used during online firmware upgrade.
7415 memcpy(&ioc->prev_fw_facts, &ioc->facts,
7416 sizeof(struct mpt3sas_facts));
7418 ioc->non_operational_loop = 0;
7419 ioc->ioc_coredump_loop = 0;
7420 ioc->got_task_abort_from_ioctl = 0;
7425 ioc->remove_host = 1;
7427 mpt3sas_base_free_resources(ioc);
7428 _base_release_memory_pools(ioc);
7429 pci_set_drvdata(ioc->pdev, NULL);
7430 kfree(ioc->cpu_msix_table);
7431 if (ioc->is_warpdrive)
7432 kfree(ioc->reply_post_host_index);
7433 kfree(ioc->pd_handles);
7434 kfree(ioc->blocking_handles);
7435 kfree(ioc->device_remove_in_progress);
7436 kfree(ioc->pend_os_device_add);
7437 kfree(ioc->tm_cmds.reply);
7438 kfree(ioc->transport_cmds.reply);
7439 kfree(ioc->scsih_cmds.reply);
7440 kfree(ioc->config_cmds.reply);
7441 kfree(ioc->base_cmds.reply);
7442 kfree(ioc->port_enable_cmds.reply);
7443 kfree(ioc->ctl_cmds.reply);
7444 kfree(ioc->ctl_cmds.sense);
7446 ioc->ctl_cmds.reply = NULL;
7447 ioc->base_cmds.reply = NULL;
7448 ioc->tm_cmds.reply = NULL;
7449 ioc->scsih_cmds.reply = NULL;
7450 ioc->transport_cmds.reply = NULL;
7451 ioc->config_cmds.reply = NULL;
7458 * mpt3sas_base_detach - remove controller instance
7459 * @ioc: per adapter object
7462 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
7464 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7466 mpt3sas_base_stop_watchdog(ioc);
7467 mpt3sas_base_free_resources(ioc);
7468 _base_release_memory_pools(ioc);
7469 mpt3sas_free_enclosure_list(ioc);
7470 pci_set_drvdata(ioc->pdev, NULL);
7471 kfree(ioc->cpu_msix_table);
7472 if (ioc->is_warpdrive)
7473 kfree(ioc->reply_post_host_index);
7474 kfree(ioc->pd_handles);
7475 kfree(ioc->blocking_handles);
7476 kfree(ioc->device_remove_in_progress);
7477 kfree(ioc->pend_os_device_add);
7479 kfree(ioc->ctl_cmds.reply);
7480 kfree(ioc->ctl_cmds.sense);
7481 kfree(ioc->base_cmds.reply);
7482 kfree(ioc->port_enable_cmds.reply);
7483 kfree(ioc->tm_cmds.reply);
7484 kfree(ioc->transport_cmds.reply);
7485 kfree(ioc->scsih_cmds.reply);
7486 kfree(ioc->config_cmds.reply);
7490 * _base_pre_reset_handler - pre reset handler
7491 * @ioc: per adapter object
7493 static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
7495 mpt3sas_scsih_pre_reset_handler(ioc);
7496 mpt3sas_ctl_pre_reset_handler(ioc);
7497 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
7501 * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
7502 * @ioc: per adapter object
7505 _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
7508 ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
7509 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
7510 ioc->transport_cmds.status |= MPT3_CMD_RESET;
7511 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
7512 complete(&ioc->transport_cmds.done);
7514 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
7515 ioc->base_cmds.status |= MPT3_CMD_RESET;
7516 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
7517 complete(&ioc->base_cmds.done);
7519 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7520 ioc->port_enable_failed = 1;
7521 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
7522 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
7523 if (ioc->is_driver_loading) {
7524 ioc->start_scan_failed =
7525 MPI2_IOCSTATUS_INTERNAL_ERROR;
7526 ioc->start_scan = 0;
7527 ioc->port_enable_cmds.status =
7530 complete(&ioc->port_enable_cmds.done);
7533 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
7534 ioc->config_cmds.status |= MPT3_CMD_RESET;
7535 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
7536 ioc->config_cmds.smid = USHRT_MAX;
7537 complete(&ioc->config_cmds.done);
7542 * _base_clear_outstanding_commands - clear all outstanding commands
7543 * @ioc: per adapter object
7545 static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
7547 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
7548 mpt3sas_ctl_clear_outstanding_ioctls(ioc);
7549 _base_clear_outstanding_mpt_commands(ioc);
7553 * _base_reset_done_handler - reset done handler
7554 * @ioc: per adapter object
7556 static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
7558 mpt3sas_scsih_reset_done_handler(ioc);
7559 mpt3sas_ctl_reset_done_handler(ioc);
7560 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
7564 * mpt3sas_wait_for_commands_to_complete - reset controller
7565 * @ioc: Pointer to MPT_ADAPTER structure
7567 * This function is waiting 10s for all pending commands to complete
7568 * prior to putting controller in reset.
7571 mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
7575 ioc->pending_io_count = 0;
7577 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7578 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
7581 /* pending command count */
7582 ioc->pending_io_count = scsi_host_busy(ioc->shost);
7584 if (!ioc->pending_io_count)
7587 /* wait for pending commands to complete */
7588 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
7592 * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
7593 * attributes during online firmware upgrade and update the corresponding
7594 * IOC variables accordingly.
7596 * @ioc: Pointer to MPT_ADAPTER structure
7599 _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
7602 void *pd_handles = NULL, *blocking_handles = NULL;
7603 void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
7604 struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
7606 if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
7607 pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
7608 if (ioc->facts.MaxDevHandle % 8)
7611 pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
7615 "Unable to allocate the memory for pd_handles of sz: %d\n",
7619 memset(pd_handles + ioc->pd_handles_sz, 0,
7620 (pd_handles_sz - ioc->pd_handles_sz));
7621 ioc->pd_handles = pd_handles;
7623 blocking_handles = krealloc(ioc->blocking_handles,
7624 pd_handles_sz, GFP_KERNEL);
7625 if (!blocking_handles) {
7627 "Unable to allocate the memory for "
7628 "blocking_handles of sz: %d\n",
7632 memset(blocking_handles + ioc->pd_handles_sz, 0,
7633 (pd_handles_sz - ioc->pd_handles_sz));
7634 ioc->blocking_handles = blocking_handles;
7635 ioc->pd_handles_sz = pd_handles_sz;
7637 pend_os_device_add = krealloc(ioc->pend_os_device_add,
7638 pd_handles_sz, GFP_KERNEL);
7639 if (!pend_os_device_add) {
7641 "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
7645 memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
7646 (pd_handles_sz - ioc->pend_os_device_add_sz));
7647 ioc->pend_os_device_add = pend_os_device_add;
7648 ioc->pend_os_device_add_sz = pd_handles_sz;
7650 device_remove_in_progress = krealloc(
7651 ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
7652 if (!device_remove_in_progress) {
7654 "Unable to allocate the memory for "
7655 "device_remove_in_progress of sz: %d\n "
7659 memset(device_remove_in_progress +
7660 ioc->device_remove_in_progress_sz, 0,
7661 (pd_handles_sz - ioc->device_remove_in_progress_sz));
7662 ioc->device_remove_in_progress = device_remove_in_progress;
7663 ioc->device_remove_in_progress_sz = pd_handles_sz;
7666 memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
7671 * mpt3sas_base_hard_reset_handler - reset controller
7672 * @ioc: Pointer to MPT_ADAPTER structure
7673 * @type: FORCE_BIG_HAMMER or SOFT_RESET
7675 * Return: 0 for success, non-zero for failure.
7678 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
7679 enum reset_type type)
7682 unsigned long flags;
7684 u8 is_fault = 0, is_trigger = 0;
7686 dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
7688 if (ioc->pci_error_recovery) {
7689 ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
7694 if (mpt3sas_fwfault_debug)
7695 mpt3sas_halt_firmware(ioc);
7697 /* wait for an active reset in progress to complete */
7698 mutex_lock(&ioc->reset_in_progress_mutex);
7700 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
7701 ioc->shost_recovery = 1;
7702 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
7704 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
7705 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
7706 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
7707 MPT3_DIAG_BUFFER_IS_RELEASED))) {
7709 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7710 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
7711 (ioc_state & MPI2_IOC_STATE_MASK) ==
7712 MPI2_IOC_STATE_COREDUMP)
7715 _base_pre_reset_handler(ioc);
7716 mpt3sas_wait_for_commands_to_complete(ioc);
7717 _base_mask_interrupts(ioc);
7718 r = _base_make_ioc_ready(ioc, type);
7721 _base_clear_outstanding_commands(ioc);
7723 /* If this hard reset is called while port enable is active, then
7724 * there is no reason to call make_ioc_operational
7726 if (ioc->is_driver_loading && ioc->port_enable_failed) {
7727 ioc->remove_host = 1;
7731 r = _base_get_ioc_facts(ioc);
7735 r = _base_check_ioc_facts_changes(ioc);
7738 "Some of the parameters got changed in this new firmware"
7739 " image and it requires system reboot\n");
7742 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
7743 panic("%s: Issue occurred with flashing controller firmware."
7744 "Please reboot the system and ensure that the correct"
7745 " firmware version is running\n", ioc->name);
7747 r = _base_make_ioc_operational(ioc);
7749 _base_reset_done_handler(ioc);
7752 ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
7754 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
7755 ioc->shost_recovery = 0;
7756 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
7757 ioc->ioc_reset_count++;
7758 mutex_unlock(&ioc->reset_in_progress_mutex);
7761 if ((r == 0) && is_trigger) {
7763 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
7765 mpt3sas_trigger_master(ioc,
7766 MASTER_TRIGGER_ADAPTER_RESET);
7768 dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));