2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
6 * Copyright (C) 2012-2014 LSI Corporation
7 * Copyright (C) 2013-2014 Avago Technologies
8 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/kdev_t.h>
54 #include <linux/blkdev.h>
55 #include <linux/delay.h>
56 #include <linux/interrupt.h>
57 #include <linux/dma-mapping.h>
59 #include <linux/time.h>
60 #include <linux/ktime.h>
61 #include <linux/kthread.h>
62 #include <asm/page.h> /* To get host page size per arch */
63 #include <linux/aer.h>
66 #include "mpt3sas_base.h"
68 static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
71 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
73 /* maximum controller queue depth */
74 #define MAX_HBA_QUEUE_DEPTH 30000
75 #define MAX_CHAIN_DEPTH 100000
76 static int max_queue_depth = -1;
77 module_param(max_queue_depth, int, 0444);
78 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
80 static int max_sgl_entries = -1;
81 module_param(max_sgl_entries, int, 0444);
82 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
84 static int msix_disable = -1;
85 module_param(msix_disable, int, 0444);
86 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
88 static int smp_affinity_enable = 1;
89 module_param(smp_affinity_enable, int, 0444);
90 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
92 static int max_msix_vectors = -1;
93 module_param(max_msix_vectors, int, 0444);
94 MODULE_PARM_DESC(max_msix_vectors,
97 static int irqpoll_weight = -1;
98 module_param(irqpoll_weight, int, 0444);
99 MODULE_PARM_DESC(irqpoll_weight,
100 "irq poll weight (default= one fourth of HBA queue depth)");
102 static int mpt3sas_fwfault_debug;
103 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
104 " enable detection of firmware fault and halt firmware - (default=0)");
106 static int perf_mode = -1;
107 module_param(perf_mode, int, 0444);
108 MODULE_PARM_DESC(perf_mode,
109 "Performance mode (only for Aero/Sea Generation), options:\n\t\t"
110 "0 - balanced: high iops mode is enabled &\n\t\t"
111 "interrupt coalescing is enabled only on high iops queues,\n\t\t"
112 "1 - iops: high iops mode is disabled &\n\t\t"
113 "interrupt coalescing is enabled on all queues,\n\t\t"
114 "2 - latency: high iops mode is disabled &\n\t\t"
115 "interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
116 "\t\tdefault - default perf_mode is 'balanced'"
119 enum mpt3sas_perf_mode {
120 MPT_PERF_MODE_DEFAULT = -1,
121 MPT_PERF_MODE_BALANCED = 0,
122 MPT_PERF_MODE_IOPS = 1,
123 MPT_PERF_MODE_LATENCY = 2,
127 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
128 u32 ioc_state, int timeout);
130 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
132 _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
135 * mpt3sas_base_check_cmd_timeout - Function
136 * to check timeout and command termination due
139 * @ioc: per adapter object.
140 * @status: Status of issued command.
141 * @mpi_request:mf request pointer.
142 * @sz: size of buffer.
144 * @Returns - 1/0 Reset to be done or Not
147 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
148 u8 status, void *mpi_request, int sz)
152 if (!(status & MPT3_CMD_RESET))
155 ioc_err(ioc, "Command %s\n",
156 issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
157 _debug_dump_mf(mpi_request, sz);
163 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
170 _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
172 int ret = param_set_int(val, kp);
173 struct MPT3SAS_ADAPTER *ioc;
178 /* global ioc spinlock to protect controller list on list operations */
179 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
180 spin_lock(&gioc_lock);
181 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
182 ioc->fwfault_debug = mpt3sas_fwfault_debug;
183 spin_unlock(&gioc_lock);
186 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
187 param_get_int, &mpt3sas_fwfault_debug, 0644);
190 * _base_readl_aero - retry readl for max three times.
191 * @addr: MPT Fusion system interface register address
193 * Retry the readl() for max three times if it gets zero value
194 * while reading the system interface register.
197 _base_readl_aero(const volatile void __iomem *addr)
202 ret_val = readl(addr);
204 } while (ret_val == 0 && i < 3);
210 _base_readl(const volatile void __iomem *addr)
216 * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
219 * @ioc: per adapter object
220 * @reply: reply message frame(lower 32bit addr)
221 * @index: System request message index.
224 _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
228 * 256 is offset within sys register.
229 * 256 offset MPI frame starts. Max MPI frame supported is 32.
230 * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
232 u16 cmd_credit = ioc->facts.RequestCredit + 1;
233 void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
234 MPI_FRAME_START_OFFSET +
235 (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
237 writel(reply, reply_free_iomem);
241 * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
242 * to system/BAR0 region.
244 * @dst_iomem: Pointer to the destination location in BAR0 space.
245 * @src: Pointer to the Source data.
246 * @size: Size of data to be copied.
249 _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
252 u32 *src_virt_mem = (u32 *)src;
254 for (i = 0; i < size/4; i++)
255 writel((u32)src_virt_mem[i],
256 (void __iomem *)dst_iomem + (i * 4));
260 * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
262 * @dst_iomem: Pointer to the destination location in BAR0 space.
263 * @src: Pointer to the Source data.
264 * @size: Size of data to be copied.
267 _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
270 u32 *src_virt_mem = (u32 *)(src);
272 for (i = 0; i < size/4; i++)
273 writel((u32)src_virt_mem[i],
274 (void __iomem *)dst_iomem + (i * 4));
278 * _base_get_chain - Calculates and Returns virtual chain address
279 * for the provided smid in BAR0 space.
281 * @ioc: per adapter object
282 * @smid: system request message index
283 * @sge_chain_count: Scatter gather chain count.
285 * Return: the chain address.
287 static inline void __iomem*
288 _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
291 void __iomem *base_chain, *chain_virt;
292 u16 cmd_credit = ioc->facts.RequestCredit + 1;
294 base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
295 (cmd_credit * ioc->request_sz) +
296 REPLY_FREE_POOL_SIZE;
297 chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
298 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
303 * _base_get_chain_phys - Calculates and Returns physical address
304 * in BAR0 for scatter gather chains, for
307 * @ioc: per adapter object
308 * @smid: system request message index
309 * @sge_chain_count: Scatter gather chain count.
311 * Return: Physical chain address.
313 static inline phys_addr_t
314 _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
317 phys_addr_t base_chain_phys, chain_phys;
318 u16 cmd_credit = ioc->facts.RequestCredit + 1;
320 base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET +
321 (cmd_credit * ioc->request_sz) +
322 REPLY_FREE_POOL_SIZE;
323 chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
324 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
329 * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
330 * buffer address for the provided smid.
331 * (Each smid can have 64K starts from 17024)
333 * @ioc: per adapter object
334 * @smid: system request message index
336 * Return: Pointer to buffer location in BAR0.
339 static void __iomem *
340 _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
342 u16 cmd_credit = ioc->facts.RequestCredit + 1;
343 // Added extra 1 to reach end of chain.
344 void __iomem *chain_end = _base_get_chain(ioc,
346 ioc->facts.MaxChainDepth);
347 return chain_end + (smid * 64 * 1024);
351 * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
352 * Host buffer Physical address for the provided smid.
353 * (Each smid can have 64K starts from 17024)
355 * @ioc: per adapter object
356 * @smid: system request message index
358 * Return: Pointer to buffer location in BAR0.
361 _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
363 u16 cmd_credit = ioc->facts.RequestCredit + 1;
364 phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
366 ioc->facts.MaxChainDepth);
367 return chain_end_phys + (smid * 64 * 1024);
371 * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
372 * lookup list and Provides chain_buffer
373 * address for the matching dma address.
374 * (Each smid can have 64K starts from 17024)
376 * @ioc: per adapter object
377 * @chain_buffer_dma: Chain buffer dma address.
379 * Return: Pointer to chain buffer. Or Null on Failure.
382 _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
383 dma_addr_t chain_buffer_dma)
386 struct chain_tracker *ct;
388 for (index = 0; index < ioc->scsiio_depth; index++) {
389 for (j = 0; j < ioc->chains_needed_per_io; j++) {
390 ct = &ioc->chain_lookup[index].chains_per_smid[j];
391 if (ct && ct->chain_buffer_dma == chain_buffer_dma)
392 return ct->chain_buffer;
395 ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
400 * _clone_sg_entries - MPI EP's scsiio and config requests
401 * are handled here. Base function for
402 * double buffering, before submitting
405 * @ioc: per adapter object.
406 * @mpi_request: mf request pointer.
407 * @smid: system request message index.
409 static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
410 void *mpi_request, u16 smid)
412 Mpi2SGESimple32_t *sgel, *sgel_next;
413 u32 sgl_flags, sge_chain_count = 0;
414 bool is_write = false;
416 void __iomem *buffer_iomem;
417 phys_addr_t buffer_iomem_phys;
418 void __iomem *buff_ptr;
419 phys_addr_t buff_ptr_phys;
420 void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
421 void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
422 phys_addr_t dst_addr_phys;
423 MPI2RequestHeader_t *request_hdr;
424 struct scsi_cmnd *scmd;
425 struct scatterlist *sg_scmd = NULL;
426 int is_scsiio_req = 0;
428 request_hdr = (MPI2RequestHeader_t *) mpi_request;
430 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
431 Mpi25SCSIIORequest_t *scsiio_request =
432 (Mpi25SCSIIORequest_t *)mpi_request;
433 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
435 } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
436 Mpi2ConfigRequest_t *config_req =
437 (Mpi2ConfigRequest_t *)mpi_request;
438 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
442 /* From smid we can get scsi_cmd, once we have sg_scmd,
443 * we just need to get sg_virt and sg_next to get virual
444 * address associated with sgel->Address.
448 /* Get scsi_cmd using smid */
449 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
451 ioc_err(ioc, "scmd is NULL\n");
455 /* Get sg_scmd from scmd provided */
456 sg_scmd = scsi_sglist(scmd);
460 * 0 - 255 System register
461 * 256 - 4352 MPI Frame. (This is based on maxCredit 32)
462 * 4352 - 4864 Reply_free pool (512 byte is reserved
463 * considering maxCredit 32. Reply need extra
464 * room, for mCPU case kept four times of
466 * 4864 - 17152 SGE chain element. (32cmd * 3 chain of
467 * 128 byte size = 12288)
468 * 17152 - x Host buffer mapped with smid.
469 * (Each smid can have 64K Max IO.)
470 * BAR0+Last 1K MSIX Addr and Data
471 * Total size in use 2113664 bytes of 4MB BAR0
474 buffer_iomem = _base_get_buffer_bar0(ioc, smid);
475 buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
477 buff_ptr = buffer_iomem;
478 buff_ptr_phys = buffer_iomem_phys;
479 WARN_ON(buff_ptr_phys > U32_MAX);
481 if (le32_to_cpu(sgel->FlagsLength) &
482 (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
485 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
488 (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
490 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
491 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
493 * Helper function which on passing
494 * chain_buffer_dma returns chain_buffer. Get
495 * the virtual address for sgel->Address
498 _base_get_chain_buffer_dma_to_chain_buffer(ioc,
499 le32_to_cpu(sgel->Address));
500 if (sgel_next == NULL)
503 * This is coping 128 byte chain
504 * frame (not a host buffer)
506 dst_chain_addr[sge_chain_count] =
508 smid, sge_chain_count);
509 src_chain_addr[sge_chain_count] =
511 dst_addr_phys = _base_get_chain_phys(ioc,
512 smid, sge_chain_count);
513 WARN_ON(dst_addr_phys > U32_MAX);
515 cpu_to_le32(lower_32_bits(dst_addr_phys));
519 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
522 _base_clone_to_sys_mem(buff_ptr,
524 (le32_to_cpu(sgel->FlagsLength) &
527 * FIXME: this relies on a a zero
531 cpu_to_le32((u32)buff_ptr_phys);
533 _base_clone_to_sys_mem(buff_ptr,
535 (le32_to_cpu(sgel->FlagsLength) &
538 cpu_to_le32((u32)buff_ptr_phys);
541 buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
543 buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
545 if ((le32_to_cpu(sgel->FlagsLength) &
546 (MPI2_SGE_FLAGS_END_OF_BUFFER
547 << MPI2_SGE_FLAGS_SHIFT)))
548 goto eob_clone_chain;
551 * Every single element in MPT will have
552 * associated sg_next. Better to sanity that
553 * sg_next is not NULL, but it will be a bug
557 sg_scmd = sg_next(sg_scmd);
561 goto eob_clone_chain;
569 for (i = 0; i < sge_chain_count; i++) {
571 _base_clone_to_sys_mem(dst_chain_addr[i],
572 src_chain_addr[i], ioc->request_sz);
577 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
578 * @arg: input argument, used to derive ioc
581 * 0 if controller is removed from pci subsystem.
584 static int mpt3sas_remove_dead_ioc_func(void *arg)
586 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
587 struct pci_dev *pdev;
595 pci_stop_and_remove_bus_device_locked(pdev);
600 * _base_fault_reset_work - workq handling ioc fault conditions
601 * @work: input argument, used to derive ioc
606 _base_fault_reset_work(struct work_struct *work)
608 struct MPT3SAS_ADAPTER *ioc =
609 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
613 struct task_struct *p;
616 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
617 if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
618 ioc->pci_error_recovery)
620 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
622 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
623 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
624 ioc_err(ioc, "SAS host is non-operational !!!!\n");
626 /* It may be possible that EEH recovery can resolve some of
627 * pci bus failure issues rather removing the dead ioc function
628 * by considering controller is in a non-operational state. So
629 * here priority is given to the EEH recovery. If it doesn't
630 * not resolve this issue, mpt3sas driver will consider this
631 * controller to non-operational state and remove the dead ioc
634 if (ioc->non_operational_loop++ < 5) {
635 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
641 * Call _scsih_flush_pending_cmds callback so that we flush all
642 * pending commands back to OS. This call is required to aovid
643 * deadlock at block layer. Dead IOC will fail to do diag reset,
644 * and this call is safe since dead ioc will never return any
645 * command back from HW.
647 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
649 * Set remove_host flag early since kernel thread will
650 * take some time to execute.
652 ioc->remove_host = 1;
653 /*Remove the Dead Host */
654 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
655 "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
657 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
660 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
662 return; /* don't rearm timer */
665 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
666 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
667 ioc->manu_pg11.CoreDumpTOSec :
668 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
670 timeout /= (FAULT_POLLING_INTERVAL/1000);
672 if (ioc->ioc_coredump_loop == 0) {
673 mpt3sas_print_coredump_info(ioc,
674 doorbell & MPI2_DOORBELL_DATA_MASK);
675 /* do not accept any IOs and disable the interrupts */
677 &ioc->ioc_reset_in_progress_lock, flags);
678 ioc->shost_recovery = 1;
679 spin_unlock_irqrestore(
680 &ioc->ioc_reset_in_progress_lock, flags);
681 mpt3sas_base_mask_interrupts(ioc);
682 _base_clear_outstanding_commands(ioc);
685 ioc_info(ioc, "%s: CoreDump loop %d.",
686 __func__, ioc->ioc_coredump_loop);
688 /* Wait until CoreDump completes or times out */
689 if (ioc->ioc_coredump_loop++ < timeout) {
691 &ioc->ioc_reset_in_progress_lock, flags);
696 if (ioc->ioc_coredump_loop) {
697 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
698 ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
699 __func__, ioc->ioc_coredump_loop);
701 ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
702 __func__, ioc->ioc_coredump_loop);
703 ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
705 ioc->non_operational_loop = 0;
706 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
707 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
708 ioc_warn(ioc, "%s: hard reset: %s\n",
709 __func__, rc == 0 ? "success" : "failed");
710 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
711 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
712 mpt3sas_print_fault_code(ioc, doorbell &
713 MPI2_DOORBELL_DATA_MASK);
714 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
715 MPI2_IOC_STATE_COREDUMP)
716 mpt3sas_print_coredump_info(ioc, doorbell &
717 MPI2_DOORBELL_DATA_MASK);
718 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
719 MPI2_IOC_STATE_OPERATIONAL)
720 return; /* don't rearm timer */
722 ioc->ioc_coredump_loop = 0;
724 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
726 if (ioc->fault_reset_work_q)
727 queue_delayed_work(ioc->fault_reset_work_q,
728 &ioc->fault_reset_work,
729 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
730 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
734 * mpt3sas_base_start_watchdog - start the fault_reset_work_q
735 * @ioc: per adapter object
740 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
744 if (ioc->fault_reset_work_q)
747 /* initialize fault polling */
749 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
750 snprintf(ioc->fault_reset_work_q_name,
751 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
752 ioc->driver_name, ioc->id);
753 ioc->fault_reset_work_q =
754 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
755 if (!ioc->fault_reset_work_q) {
756 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
759 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
760 if (ioc->fault_reset_work_q)
761 queue_delayed_work(ioc->fault_reset_work_q,
762 &ioc->fault_reset_work,
763 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
764 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
768 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
769 * @ioc: per adapter object
774 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
777 struct workqueue_struct *wq;
779 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
780 wq = ioc->fault_reset_work_q;
781 ioc->fault_reset_work_q = NULL;
782 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
784 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
786 destroy_workqueue(wq);
791 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
792 * @ioc: per adapter object
793 * @fault_code: fault code
796 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
798 ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
802 * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
803 * @ioc: per adapter object
804 * @fault_code: fault code
809 mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
811 ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
815 * mpt3sas_base_wait_for_coredump_completion - Wait until coredump
816 * completes or times out
817 * @ioc: per adapter object
818 * @caller: caller function name
820 * Returns 0 for success, non-zero for failure.
823 mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
826 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
827 ioc->manu_pg11.CoreDumpTOSec :
828 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
830 int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
835 "%s: CoreDump timed out. (ioc_state=0x%x)\n",
839 "%s: CoreDump completed. (ioc_state=0x%x)\n",
846 * mpt3sas_halt_firmware - halt's mpt controller firmware
847 * @ioc: per adapter object
849 * For debugging timeout related issues. Writing 0xCOFFEE00
850 * to the doorbell register will halt controller firmware. With
851 * the purpose to stop both driver and firmware, the enduser can
852 * obtain a ring buffer from controller UART.
855 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
859 if (!ioc->fwfault_debug)
864 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
865 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
866 mpt3sas_print_fault_code(ioc, doorbell &
867 MPI2_DOORBELL_DATA_MASK);
868 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
869 MPI2_IOC_STATE_COREDUMP) {
870 mpt3sas_print_coredump_info(ioc, doorbell &
871 MPI2_DOORBELL_DATA_MASK);
873 writel(0xC0FFEE00, &ioc->chip->Doorbell);
874 ioc_err(ioc, "Firmware is halted due to command timeout\n");
877 if (ioc->fwfault_debug == 2)
881 panic("panic in %s\n", __func__);
885 * _base_sas_ioc_info - verbose translation of the ioc status
886 * @ioc: per adapter object
887 * @mpi_reply: reply mf payload returned from firmware
888 * @request_hdr: request mf
891 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
892 MPI2RequestHeader_t *request_hdr)
894 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
898 char *func_str = NULL;
900 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
901 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
902 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
903 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
906 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
909 switch (ioc_status) {
911 /****************************************************************************
912 * Common IOCStatus values for all replies
913 ****************************************************************************/
915 case MPI2_IOCSTATUS_INVALID_FUNCTION:
916 desc = "invalid function";
918 case MPI2_IOCSTATUS_BUSY:
921 case MPI2_IOCSTATUS_INVALID_SGL:
922 desc = "invalid sgl";
924 case MPI2_IOCSTATUS_INTERNAL_ERROR:
925 desc = "internal error";
927 case MPI2_IOCSTATUS_INVALID_VPID:
928 desc = "invalid vpid";
930 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
931 desc = "insufficient resources";
933 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
934 desc = "insufficient power";
936 case MPI2_IOCSTATUS_INVALID_FIELD:
937 desc = "invalid field";
939 case MPI2_IOCSTATUS_INVALID_STATE:
940 desc = "invalid state";
942 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
943 desc = "op state not supported";
946 /****************************************************************************
947 * Config IOCStatus values
948 ****************************************************************************/
950 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
951 desc = "config invalid action";
953 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
954 desc = "config invalid type";
956 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
957 desc = "config invalid page";
959 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
960 desc = "config invalid data";
962 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
963 desc = "config no defaults";
965 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
966 desc = "config cant commit";
969 /****************************************************************************
971 ****************************************************************************/
973 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
974 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
975 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
976 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
977 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
978 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
979 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
980 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
981 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
982 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
983 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
984 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
987 /****************************************************************************
988 * For use by SCSI Initiator and SCSI Target end-to-end data protection
989 ****************************************************************************/
991 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
992 desc = "eedp guard error";
994 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
995 desc = "eedp ref tag error";
997 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
998 desc = "eedp app tag error";
1001 /****************************************************************************
1002 * SCSI Target values
1003 ****************************************************************************/
1005 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
1006 desc = "target invalid io index";
1008 case MPI2_IOCSTATUS_TARGET_ABORTED:
1009 desc = "target aborted";
1011 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
1012 desc = "target no conn retryable";
1014 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
1015 desc = "target no connection";
1017 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
1018 desc = "target xfer count mismatch";
1020 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
1021 desc = "target data offset error";
1023 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
1024 desc = "target too much write data";
1026 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
1027 desc = "target iu too short";
1029 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
1030 desc = "target ack nak timeout";
1032 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
1033 desc = "target nak received";
1036 /****************************************************************************
1037 * Serial Attached SCSI values
1038 ****************************************************************************/
1040 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
1041 desc = "smp request failed";
1043 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
1044 desc = "smp data overrun";
1047 /****************************************************************************
1048 * Diagnostic Buffer Post / Diagnostic Release values
1049 ****************************************************************************/
1051 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
1052 desc = "diagnostic released";
1061 switch (request_hdr->Function) {
1062 case MPI2_FUNCTION_CONFIG:
1063 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
1064 func_str = "config_page";
1066 case MPI2_FUNCTION_SCSI_TASK_MGMT:
1067 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
1068 func_str = "task_mgmt";
1070 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
1071 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
1072 func_str = "sas_iounit_ctl";
1074 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
1075 frame_sz = sizeof(Mpi2SepRequest_t);
1076 func_str = "enclosure";
1078 case MPI2_FUNCTION_IOC_INIT:
1079 frame_sz = sizeof(Mpi2IOCInitRequest_t);
1080 func_str = "ioc_init";
1082 case MPI2_FUNCTION_PORT_ENABLE:
1083 frame_sz = sizeof(Mpi2PortEnableRequest_t);
1084 func_str = "port_enable";
1086 case MPI2_FUNCTION_SMP_PASSTHROUGH:
1087 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
1088 func_str = "smp_passthru";
1090 case MPI2_FUNCTION_NVME_ENCAPSULATED:
1091 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
1093 func_str = "nvme_encapsulated";
1097 func_str = "unknown";
1101 ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
1102 desc, ioc_status, request_hdr, func_str);
1104 _debug_dump_mf(request_hdr, frame_sz/4);
1108 * _base_display_event_data - verbose translation of firmware asyn events
1109 * @ioc: per adapter object
1110 * @mpi_reply: reply mf payload returned from firmware
1113 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1114 Mpi2EventNotificationReply_t *mpi_reply)
1119 if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
1122 event = le16_to_cpu(mpi_reply->Event);
1125 case MPI2_EVENT_LOG_DATA:
1128 case MPI2_EVENT_STATE_CHANGE:
1129 desc = "Status Change";
1131 case MPI2_EVENT_HARD_RESET_RECEIVED:
1132 desc = "Hard Reset Received";
1134 case MPI2_EVENT_EVENT_CHANGE:
1135 desc = "Event Change";
1137 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1138 desc = "Device Status Change";
1140 case MPI2_EVENT_IR_OPERATION_STATUS:
1141 if (!ioc->hide_ir_msg)
1142 desc = "IR Operation Status";
1144 case MPI2_EVENT_SAS_DISCOVERY:
1146 Mpi2EventDataSasDiscovery_t *event_data =
1147 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1148 ioc_info(ioc, "Discovery: (%s)",
1149 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1151 if (event_data->DiscoveryStatus)
1152 pr_cont(" discovery_status(0x%08x)",
1153 le32_to_cpu(event_data->DiscoveryStatus));
1157 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1158 desc = "SAS Broadcast Primitive";
1160 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1161 desc = "SAS Init Device Status Change";
1163 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1164 desc = "SAS Init Table Overflow";
1166 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1167 desc = "SAS Topology Change List";
1169 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1170 desc = "SAS Enclosure Device Status Change";
1172 case MPI2_EVENT_IR_VOLUME:
1173 if (!ioc->hide_ir_msg)
1176 case MPI2_EVENT_IR_PHYSICAL_DISK:
1177 if (!ioc->hide_ir_msg)
1178 desc = "IR Physical Disk";
1180 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1181 if (!ioc->hide_ir_msg)
1182 desc = "IR Configuration Change List";
1184 case MPI2_EVENT_LOG_ENTRY_ADDED:
1185 if (!ioc->hide_ir_msg)
1186 desc = "Log Entry Added";
1188 case MPI2_EVENT_TEMP_THRESHOLD:
1189 desc = "Temperature Threshold";
1191 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1192 desc = "Cable Event";
1194 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1195 desc = "SAS Device Discovery Error";
1197 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1198 desc = "PCIE Device Status Change";
1200 case MPI2_EVENT_PCIE_ENUMERATION:
1202 Mpi26EventDataPCIeEnumeration_t *event_data =
1203 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1204 ioc_info(ioc, "PCIE Enumeration: (%s)",
1205 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1207 if (event_data->EnumerationStatus)
1208 pr_cont("enumeration_status(0x%08x)",
1209 le32_to_cpu(event_data->EnumerationStatus));
1213 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1214 desc = "PCIE Topology Change List";
1221 ioc_info(ioc, "%s\n", desc);
1225 * _base_sas_log_info - verbose translation of firmware log info
1226 * @ioc: per adapter object
1227 * @log_info: log info
1230 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
1232 union loginfo_type {
1241 union loginfo_type sas_loginfo;
1242 char *originator_str = NULL;
1244 sas_loginfo.loginfo = log_info;
1245 if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
1248 /* each nexus loss loginfo */
1249 if (log_info == 0x31170000)
1252 /* eat the loginfos associated with task aborts */
1253 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1254 0x31140000 || log_info == 0x31130000))
1257 switch (sas_loginfo.dw.originator) {
1259 originator_str = "IOP";
1262 originator_str = "PL";
1265 if (!ioc->hide_ir_msg)
1266 originator_str = "IR";
1268 originator_str = "WarpDrive";
1272 ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1274 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
1278 * _base_display_reply_info -
1279 * @ioc: per adapter object
1280 * @smid: system request message index
1281 * @msix_index: MSIX table index supplied by the OS
1282 * @reply: reply message frame(lower 32bit addr)
1285 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1288 MPI2DefaultReply_t *mpi_reply;
1292 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1293 if (unlikely(!mpi_reply)) {
1294 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1295 __FILE__, __LINE__, __func__);
1298 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
1300 if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1301 (ioc->logging_level & MPT_DEBUG_REPLY)) {
1302 _base_sas_ioc_info(ioc , mpi_reply,
1303 mpt3sas_base_get_msg_frame(ioc, smid));
1306 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1307 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1308 _base_sas_log_info(ioc, loginfo);
1311 if (ioc_status || loginfo) {
1312 ioc_status &= MPI2_IOCSTATUS_MASK;
1313 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1318 * mpt3sas_base_done - base internal command completion routine
1319 * @ioc: per adapter object
1320 * @smid: system request message index
1321 * @msix_index: MSIX table index supplied by the OS
1322 * @reply: reply message frame(lower 32bit addr)
1325 * 1 meaning mf should be freed from _base_interrupt
1326 * 0 means the mf is freed from this function.
1329 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1332 MPI2DefaultReply_t *mpi_reply;
1334 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1335 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
1336 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1338 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1341 ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1343 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1344 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1346 ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1348 complete(&ioc->base_cmds.done);
1353 * _base_async_event - main callback handler for firmware asyn events
1354 * @ioc: per adapter object
1355 * @msix_index: MSIX table index supplied by the OS
1356 * @reply: reply message frame(lower 32bit addr)
1359 * 1 meaning mf should be freed from _base_interrupt
1360 * 0 means the mf is freed from this function.
1363 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1365 Mpi2EventNotificationReply_t *mpi_reply;
1366 Mpi2EventAckRequest_t *ack_request;
1368 struct _event_ack_list *delayed_event_ack;
1370 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1373 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1376 _base_display_event_data(ioc, mpi_reply);
1378 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1380 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1382 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1384 if (!delayed_event_ack)
1386 INIT_LIST_HEAD(&delayed_event_ack->list);
1387 delayed_event_ack->Event = mpi_reply->Event;
1388 delayed_event_ack->EventContext = mpi_reply->EventContext;
1389 list_add_tail(&delayed_event_ack->list,
1390 &ioc->delayed_event_ack_list);
1392 ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1393 le16_to_cpu(mpi_reply->Event)));
1397 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1398 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1399 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1400 ack_request->Event = mpi_reply->Event;
1401 ack_request->EventContext = mpi_reply->EventContext;
1402 ack_request->VF_ID = 0; /* TODO */
1403 ack_request->VP_ID = 0;
1404 ioc->put_smid_default(ioc, smid);
1408 /* scsih callback handler */
1409 mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1411 /* ctl callback handler */
1412 mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1417 static struct scsiio_tracker *
1418 _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1420 struct scsi_cmnd *cmd;
1422 if (WARN_ON(!smid) ||
1423 WARN_ON(smid >= ioc->hi_priority_smid))
1426 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1428 return scsi_cmd_priv(cmd);
1434 * _base_get_cb_idx - obtain the callback index
1435 * @ioc: per adapter object
1436 * @smid: system request message index
1438 * Return: callback index.
1441 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1444 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1447 if (smid < ioc->hi_priority_smid) {
1448 struct scsiio_tracker *st;
1450 if (smid < ctl_smid) {
1451 st = _get_st_from_smid(ioc, smid);
1453 cb_idx = st->cb_idx;
1454 } else if (smid == ctl_smid)
1455 cb_idx = ioc->ctl_cb_idx;
1456 } else if (smid < ioc->internal_smid) {
1457 i = smid - ioc->hi_priority_smid;
1458 cb_idx = ioc->hpr_lookup[i].cb_idx;
1459 } else if (smid <= ioc->hba_queue_depth) {
1460 i = smid - ioc->internal_smid;
1461 cb_idx = ioc->internal_lookup[i].cb_idx;
1467 * mpt3sas_base_mask_interrupts - disable interrupts
1468 * @ioc: per adapter object
1470 * Disabling ResetIRQ, Reply and Doorbell Interrupts
1473 mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1477 ioc->mask_interrupts = 1;
1478 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1479 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1480 writel(him_register, &ioc->chip->HostInterruptMask);
1481 ioc->base_readl(&ioc->chip->HostInterruptMask);
1485 * mpt3sas_base_unmask_interrupts - enable interrupts
1486 * @ioc: per adapter object
1488 * Enabling only Reply Interrupts
1491 mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1495 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1496 him_register &= ~MPI2_HIM_RIM;
1497 writel(him_register, &ioc->chip->HostInterruptMask);
1498 ioc->mask_interrupts = 0;
1501 union reply_descriptor {
1509 static u32 base_mod64(u64 dividend, u32 divisor)
1514 pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
1515 remainder = do_div(dividend, divisor);
1520 * _base_process_reply_queue - Process reply descriptors from reply
1521 * descriptor post queue.
1522 * @reply_q: per IRQ's reply queue object.
1524 * Return: number of reply descriptors processed from reply
1528 _base_process_reply_queue(struct adapter_reply_queue *reply_q)
1530 union reply_descriptor rd;
1532 u8 request_descript_type;
1536 u8 msix_index = reply_q->msix_index;
1537 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1538 Mpi2ReplyDescriptorsUnion_t *rpf;
1542 if (!atomic_add_unless(&reply_q->busy, 1, 1))
1543 return completed_cmds;
1545 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1546 request_descript_type = rpf->Default.ReplyFlags
1547 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1548 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1549 atomic_dec(&reply_q->busy);
1550 return completed_cmds;
1555 rd.word = le64_to_cpu(rpf->Words);
1556 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1559 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1560 if (request_descript_type ==
1561 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1562 request_descript_type ==
1563 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1564 request_descript_type ==
1565 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
1566 cb_idx = _base_get_cb_idx(ioc, smid);
1567 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1568 (likely(mpt_callbacks[cb_idx] != NULL))) {
1569 rc = mpt_callbacks[cb_idx](ioc, smid,
1572 mpt3sas_base_free_smid(ioc, smid);
1574 } else if (request_descript_type ==
1575 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1576 reply = le32_to_cpu(
1577 rpf->AddressReply.ReplyFrameAddress);
1578 if (reply > ioc->reply_dma_max_address ||
1579 reply < ioc->reply_dma_min_address)
1582 cb_idx = _base_get_cb_idx(ioc, smid);
1583 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1584 (likely(mpt_callbacks[cb_idx] != NULL))) {
1585 rc = mpt_callbacks[cb_idx](ioc, smid,
1588 _base_display_reply_info(ioc,
1589 smid, msix_index, reply);
1591 mpt3sas_base_free_smid(ioc,
1595 _base_async_event(ioc, msix_index, reply);
1598 /* reply free queue handling */
1600 ioc->reply_free_host_index =
1601 (ioc->reply_free_host_index ==
1602 (ioc->reply_free_queue_depth - 1)) ?
1603 0 : ioc->reply_free_host_index + 1;
1604 ioc->reply_free[ioc->reply_free_host_index] =
1606 if (ioc->is_mcpu_endpoint)
1607 _base_clone_reply_to_sys_mem(ioc,
1609 ioc->reply_free_host_index);
1610 writel(ioc->reply_free_host_index,
1611 &ioc->chip->ReplyFreeHostIndex);
1615 rpf->Words = cpu_to_le64(ULLONG_MAX);
1616 reply_q->reply_post_host_index =
1617 (reply_q->reply_post_host_index ==
1618 (ioc->reply_post_queue_depth - 1)) ? 0 :
1619 reply_q->reply_post_host_index + 1;
1620 request_descript_type =
1621 reply_q->reply_post_free[reply_q->reply_post_host_index].
1622 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1624 /* Update the reply post host index after continuously
1625 * processing the threshold number of Reply Descriptors.
1626 * So that FW can find enough entries to post the Reply
1627 * Descriptors in the reply descriptor post queue.
1629 if (completed_cmds >= ioc->thresh_hold) {
1630 if (ioc->combined_reply_queue) {
1631 writel(reply_q->reply_post_host_index |
1632 ((msix_index & 7) <<
1633 MPI2_RPHI_MSIX_INDEX_SHIFT),
1634 ioc->replyPostRegisterIndex[msix_index/8]);
1636 writel(reply_q->reply_post_host_index |
1638 MPI2_RPHI_MSIX_INDEX_SHIFT),
1639 &ioc->chip->ReplyPostHostIndex);
1641 if (!reply_q->irq_poll_scheduled) {
1642 reply_q->irq_poll_scheduled = true;
1643 irq_poll_sched(&reply_q->irqpoll);
1645 atomic_dec(&reply_q->busy);
1646 return completed_cmds;
1648 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1650 if (!reply_q->reply_post_host_index)
1651 rpf = reply_q->reply_post_free;
1658 if (!completed_cmds) {
1659 atomic_dec(&reply_q->busy);
1660 return completed_cmds;
1663 if (ioc->is_warpdrive) {
1664 writel(reply_q->reply_post_host_index,
1665 ioc->reply_post_host_index[msix_index]);
1666 atomic_dec(&reply_q->busy);
1667 return completed_cmds;
1670 /* Update Reply Post Host Index.
1671 * For those HBA's which support combined reply queue feature
1672 * 1. Get the correct Supplemental Reply Post Host Index Register.
1673 * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1674 * Index Register address bank i.e replyPostRegisterIndex[],
1675 * 2. Then update this register with new reply host index value
1676 * in ReplyPostIndex field and the MSIxIndex field with
1677 * msix_index value reduced to a value between 0 and 7,
1678 * using a modulo 8 operation. Since each Supplemental Reply Post
1679 * Host Index Register supports 8 MSI-X vectors.
1681 * For other HBA's just update the Reply Post Host Index register with
1682 * new reply host index value in ReplyPostIndex Field and msix_index
1683 * value in MSIxIndex field.
1685 if (ioc->combined_reply_queue)
1686 writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
1687 MPI2_RPHI_MSIX_INDEX_SHIFT),
1688 ioc->replyPostRegisterIndex[msix_index/8]);
1690 writel(reply_q->reply_post_host_index | (msix_index <<
1691 MPI2_RPHI_MSIX_INDEX_SHIFT),
1692 &ioc->chip->ReplyPostHostIndex);
1693 atomic_dec(&reply_q->busy);
1694 return completed_cmds;
1698 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
1699 * @irq: irq number (not used)
1700 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
1702 * Return: IRQ_HANDLED if processed, else IRQ_NONE.
1705 _base_interrupt(int irq, void *bus_id)
1707 struct adapter_reply_queue *reply_q = bus_id;
1708 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1710 if (ioc->mask_interrupts)
1712 if (reply_q->irq_poll_scheduled)
1714 return ((_base_process_reply_queue(reply_q) > 0) ?
1715 IRQ_HANDLED : IRQ_NONE);
1719 * _base_irqpoll - IRQ poll callback handler
1720 * @irqpoll: irq_poll object
1721 * @budget: irq poll weight
1723 * returns number of reply descriptors processed
1726 _base_irqpoll(struct irq_poll *irqpoll, int budget)
1728 struct adapter_reply_queue *reply_q;
1729 int num_entries = 0;
1731 reply_q = container_of(irqpoll, struct adapter_reply_queue,
1733 if (reply_q->irq_line_enable) {
1734 disable_irq_nosync(reply_q->os_irq);
1735 reply_q->irq_line_enable = false;
1737 num_entries = _base_process_reply_queue(reply_q);
1738 if (num_entries < budget) {
1739 irq_poll_complete(irqpoll);
1740 reply_q->irq_poll_scheduled = false;
1741 reply_q->irq_line_enable = true;
1742 enable_irq(reply_q->os_irq);
1749 * _base_init_irqpolls - initliaze IRQ polls
1750 * @ioc: per adapter object
1755 _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
1757 struct adapter_reply_queue *reply_q, *next;
1759 if (list_empty(&ioc->reply_queue_list))
1762 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1763 irq_poll_init(&reply_q->irqpoll,
1764 ioc->hba_queue_depth/4, _base_irqpoll);
1765 reply_q->irq_poll_scheduled = false;
1766 reply_q->irq_line_enable = true;
1767 reply_q->os_irq = pci_irq_vector(ioc->pdev,
1768 reply_q->msix_index);
1773 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1774 * @ioc: per adapter object
1776 * Return: Whether or not MSI/X is enabled.
1779 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1781 return (ioc->facts.IOCCapabilities &
1782 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1786 * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
1787 * @ioc: per adapter object
1788 * @poll: poll over reply descriptor pools incase interrupt for
1789 * timed-out SCSI command got delayed
1790 * Context: non ISR conext
1792 * Called when a Task Management request has completed.
1795 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
1797 struct adapter_reply_queue *reply_q;
1799 /* If MSIX capability is turned off
1800 * then multi-queues are not enabled
1802 if (!_base_is_controller_msix_enabled(ioc))
1805 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1806 if (ioc->shost_recovery || ioc->remove_host ||
1807 ioc->pci_error_recovery)
1809 /* TMs are on msix_index == 0 */
1810 if (reply_q->msix_index == 0)
1812 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
1813 if (reply_q->irq_poll_scheduled) {
1814 /* Calling irq_poll_disable will wait for any pending
1815 * callbacks to have completed.
1817 irq_poll_disable(&reply_q->irqpoll);
1818 irq_poll_enable(&reply_q->irqpoll);
1819 /* check how the scheduled poll has ended,
1820 * clean up only if necessary
1822 if (reply_q->irq_poll_scheduled) {
1823 reply_q->irq_poll_scheduled = false;
1824 reply_q->irq_line_enable = true;
1825 enable_irq(reply_q->os_irq);
1830 _base_process_reply_queue(reply_q);
1834 * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1835 * @cb_idx: callback index
1838 mpt3sas_base_release_callback_handler(u8 cb_idx)
1840 mpt_callbacks[cb_idx] = NULL;
1844 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1845 * @cb_func: callback function
1847 * Return: Index of @cb_func.
1850 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1854 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1855 if (mpt_callbacks[cb_idx] == NULL)
1858 mpt_callbacks[cb_idx] = cb_func;
1863 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1866 mpt3sas_base_initialize_callback_handler(void)
1870 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1871 mpt3sas_base_release_callback_handler(cb_idx);
1876 * _base_build_zero_len_sge - build zero length sg entry
1877 * @ioc: per adapter object
1878 * @paddr: virtual address for SGE
1880 * Create a zero length scatter gather entry to insure the IOCs hardware has
1881 * something to use if the target device goes brain dead and tries
1882 * to send data even when none is asked for.
1885 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1887 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1888 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1889 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1890 MPI2_SGE_FLAGS_SHIFT);
1891 ioc->base_add_sg_single(paddr, flags_length, -1);
1895 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1896 * @paddr: virtual address for SGE
1897 * @flags_length: SGE flags and data transfer length
1898 * @dma_addr: Physical address
1901 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1903 Mpi2SGESimple32_t *sgel = paddr;
1905 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1906 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1907 sgel->FlagsLength = cpu_to_le32(flags_length);
1908 sgel->Address = cpu_to_le32(dma_addr);
1913 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1914 * @paddr: virtual address for SGE
1915 * @flags_length: SGE flags and data transfer length
1916 * @dma_addr: Physical address
1919 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1921 Mpi2SGESimple64_t *sgel = paddr;
1923 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1924 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1925 sgel->FlagsLength = cpu_to_le32(flags_length);
1926 sgel->Address = cpu_to_le64(dma_addr);
1930 * _base_get_chain_buffer_tracker - obtain chain tracker
1931 * @ioc: per adapter object
1932 * @scmd: SCSI commands of the IO request
1934 * Return: chain tracker from chain_lookup table using key as
1935 * smid and smid's chain_offset.
1937 static struct chain_tracker *
1938 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
1939 struct scsi_cmnd *scmd)
1941 struct chain_tracker *chain_req;
1942 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
1943 u16 smid = st->smid;
1945 atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
1947 if (chain_offset == ioc->chains_needed_per_io)
1950 chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
1951 atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
1957 * _base_build_sg - build generic sg
1958 * @ioc: per adapter object
1959 * @psge: virtual address for SGE
1960 * @data_out_dma: physical address for WRITES
1961 * @data_out_sz: data xfer size for WRITES
1962 * @data_in_dma: physical address for READS
1963 * @data_in_sz: data xfer size for READS
1966 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1967 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1972 if (!data_out_sz && !data_in_sz) {
1973 _base_build_zero_len_sge(ioc, psge);
1977 if (data_out_sz && data_in_sz) {
1978 /* WRITE sgel first */
1979 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1980 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1981 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1982 ioc->base_add_sg_single(psge, sgl_flags |
1983 data_out_sz, data_out_dma);
1986 psge += ioc->sge_size;
1988 /* READ sgel last */
1989 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1990 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1991 MPI2_SGE_FLAGS_END_OF_LIST);
1992 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1993 ioc->base_add_sg_single(psge, sgl_flags |
1994 data_in_sz, data_in_dma);
1995 } else if (data_out_sz) /* WRITE */ {
1996 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1997 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1998 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1999 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2000 ioc->base_add_sg_single(psge, sgl_flags |
2001 data_out_sz, data_out_dma);
2002 } else if (data_in_sz) /* READ */ {
2003 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2004 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2005 MPI2_SGE_FLAGS_END_OF_LIST);
2006 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2007 ioc->base_add_sg_single(psge, sgl_flags |
2008 data_in_sz, data_in_dma);
2012 /* IEEE format sgls */
2015 * _base_build_nvme_prp - This function is called for NVMe end devices to build
2016 * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
2017 * entry of the NVMe message (PRP1). If the data buffer is small enough to be
2018 * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is
2019 * used to describe a larger data buffer. If the data buffer is too large to
2020 * describe using the two PRP entriess inside the NVMe message, then PRP1
2021 * describes the first data memory segment, and PRP2 contains a pointer to a PRP
2022 * list located elsewhere in memory to describe the remaining data memory
2023 * segments. The PRP list will be contiguous.
2025 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
2026 * consists of a list of PRP entries to describe a number of noncontigous
2027 * physical memory segments as a single memory buffer, just as a SGL does. Note
2028 * however, that this function is only used by the IOCTL call, so the memory
2029 * given will be guaranteed to be contiguous. There is no need to translate
2030 * non-contiguous SGL into a PRP in this case. All PRPs will describe
2031 * contiguous space that is one page size each.
2033 * Each NVMe message contains two PRP entries. The first (PRP1) either contains
2034 * a PRP list pointer or a PRP element, depending upon the command. PRP2
2035 * contains the second PRP element if the memory being described fits within 2
2036 * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
2038 * A PRP list pointer contains the address of a PRP list, structured as a linear
2039 * array of PRP entries. Each PRP entry in this list describes a segment of
2042 * Each 64-bit PRP entry comprises an address and an offset field. The address
2043 * always points at the beginning of a 4KB physical memory page, and the offset
2044 * describes where within that 4KB page the memory segment begins. Only the
2045 * first element in a PRP list may contain a non-zero offest, implying that all
2046 * memory segments following the first begin at the start of a 4KB page.
2048 * Each PRP element normally describes 4KB of physical memory, with exceptions
2049 * for the first and last elements in the list. If the memory being described
2050 * by the list begins at a non-zero offset within the first 4KB page, then the
2051 * first PRP element will contain a non-zero offset indicating where the region
2052 * begins within the 4KB page. The last memory segment may end before the end
2053 * of the 4KB segment, depending upon the overall size of the memory being
2054 * described by the PRP list.
2056 * Since PRP entries lack any indication of size, the overall data buffer length
2057 * is used to determine where the end of the data memory buffer is located, and
2058 * how many PRP entries are required to describe it.
2060 * @ioc: per adapter object
2061 * @smid: system request message index for getting asscociated SGL
2062 * @nvme_encap_request: the NVMe request msg frame pointer
2063 * @data_out_dma: physical address for WRITES
2064 * @data_out_sz: data xfer size for WRITES
2065 * @data_in_dma: physical address for READS
2066 * @data_in_sz: data xfer size for READS
2069 _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2070 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
2071 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2074 int prp_size = NVME_PRP_SIZE;
2075 __le64 *prp_entry, *prp1_entry, *prp2_entry;
2077 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
2078 u32 offset, entry_len;
2079 u32 page_mask_result, page_mask;
2081 struct mpt3sas_nvme_cmd *nvme_cmd =
2082 (void *)nvme_encap_request->NVMe_Command;
2085 * Not all commands require a data transfer. If no data, just return
2086 * without constructing any PRP.
2088 if (!data_in_sz && !data_out_sz)
2090 prp1_entry = &nvme_cmd->prp1;
2091 prp2_entry = &nvme_cmd->prp2;
2092 prp_entry = prp1_entry;
2094 * For the PRP entries, use the specially allocated buffer of
2095 * contiguous memory.
2097 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
2098 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2101 * Check if we are within 1 entry of a page boundary we don't
2102 * want our first entry to be a PRP List entry.
2104 page_mask = ioc->page_size - 1;
2105 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
2106 if (!page_mask_result) {
2107 /* Bump up to next page boundary. */
2108 prp_page = (__le64 *)((u8 *)prp_page + prp_size);
2109 prp_page_dma = prp_page_dma + prp_size;
2113 * Set PRP physical pointer, which initially points to the current PRP
2116 prp_entry_dma = prp_page_dma;
2118 /* Get physical address and length of the data buffer. */
2120 dma_addr = data_in_dma;
2121 length = data_in_sz;
2123 dma_addr = data_out_dma;
2124 length = data_out_sz;
2127 /* Loop while the length is not zero. */
2130 * Check if we need to put a list pointer here if we are at
2131 * page boundary - prp_size (8 bytes).
2133 page_mask_result = (prp_entry_dma + prp_size) & page_mask;
2134 if (!page_mask_result) {
2136 * This is the last entry in a PRP List, so we need to
2137 * put a PRP list pointer here. What this does is:
2138 * - bump the current memory pointer to the next
2139 * address, which will be the next full page.
2140 * - set the PRP Entry to point to that page. This
2141 * is now the PRP List pointer.
2142 * - bump the PRP Entry pointer the start of the
2143 * next page. Since all of this PRP memory is
2144 * contiguous, no need to get a new page - it's
2145 * just the next address.
2148 *prp_entry = cpu_to_le64(prp_entry_dma);
2152 /* Need to handle if entry will be part of a page. */
2153 offset = dma_addr & page_mask;
2154 entry_len = ioc->page_size - offset;
2156 if (prp_entry == prp1_entry) {
2158 * Must fill in the first PRP pointer (PRP1) before
2161 *prp1_entry = cpu_to_le64(dma_addr);
2164 * Now point to the second PRP entry within the
2167 prp_entry = prp2_entry;
2168 } else if (prp_entry == prp2_entry) {
2170 * Should the PRP2 entry be a PRP List pointer or just
2171 * a regular PRP pointer? If there is more than one
2172 * more page of data, must use a PRP List pointer.
2174 if (length > ioc->page_size) {
2176 * PRP2 will contain a PRP List pointer because
2177 * more PRP's are needed with this command. The
2178 * list will start at the beginning of the
2179 * contiguous buffer.
2181 *prp2_entry = cpu_to_le64(prp_entry_dma);
2184 * The next PRP Entry will be the start of the
2187 prp_entry = prp_page;
2190 * After this, the PRP Entries are complete.
2191 * This command uses 2 PRP's and no PRP list.
2193 *prp2_entry = cpu_to_le64(dma_addr);
2197 * Put entry in list and bump the addresses.
2199 * After PRP1 and PRP2 are filled in, this will fill in
2200 * all remaining PRP entries in a PRP List, one per
2201 * each time through the loop.
2203 *prp_entry = cpu_to_le64(dma_addr);
2209 * Bump the phys address of the command's data buffer by the
2212 dma_addr += entry_len;
2214 /* Decrement length accounting for last partial page. */
2215 if (entry_len > length)
2218 length -= entry_len;
2223 * base_make_prp_nvme -
2224 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
2226 * @ioc: per adapter object
2227 * @scmd: SCSI command from the mid-layer
2228 * @mpi_request: mpi request
2230 * @sge_count: scatter gather element count.
2232 * Return: true: PRPs are built
2233 * false: IEEE SGLs needs to be built
2236 base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2237 struct scsi_cmnd *scmd,
2238 Mpi25SCSIIORequest_t *mpi_request,
2239 u16 smid, int sge_count)
2241 int sge_len, num_prp_in_chain = 0;
2242 Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
2244 dma_addr_t msg_dma, sge_addr, offset;
2245 u32 page_mask, page_mask_result;
2246 struct scatterlist *sg_scmd;
2248 int data_len = scsi_bufflen(scmd);
2251 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2253 * Nvme has a very convoluted prp format. One prp is required
2254 * for each page or partial page. Driver need to split up OS sg_list
2255 * entries if it is longer than one page or cross a page
2256 * boundary. Driver also have to insert a PRP list pointer entry as
2257 * the last entry in each physical page of the PRP list.
2259 * NOTE: The first PRP "entry" is actually placed in the first
2260 * SGL entry in the main message as IEEE 64 format. The 2nd
2261 * entry in the main message is the chain element, and the rest
2262 * of the PRP entries are built in the contiguous pcie buffer.
2264 page_mask = nvme_pg_size - 1;
2267 * Native SGL is needed.
2268 * Put a chain element in main message frame that points to the first
2271 * NOTE: The ChainOffset field must be 0 when using a chain pointer to
2275 /* Set main message chain element pointer */
2276 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2278 * For NVMe the chain element needs to be the 2nd SG entry in the main
2281 main_chain_element = (Mpi25IeeeSgeChain64_t *)
2282 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2285 * For the PRP entries, use the specially allocated buffer of
2286 * contiguous memory. Normal chain buffers can't be used
2287 * because each chain buffer would need to be the size of an OS
2290 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2291 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2293 main_chain_element->Address = cpu_to_le64(msg_dma);
2294 main_chain_element->NextChainOffset = 0;
2295 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2296 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2297 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2299 /* Build first prp, sge need not to be page aligned*/
2300 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2301 sg_scmd = scsi_sglist(scmd);
2302 sge_addr = sg_dma_address(sg_scmd);
2303 sge_len = sg_dma_len(sg_scmd);
2305 offset = sge_addr & page_mask;
2306 first_prp_len = nvme_pg_size - offset;
2308 ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2309 ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2311 data_len -= first_prp_len;
2313 if (sge_len > first_prp_len) {
2314 sge_addr += first_prp_len;
2315 sge_len -= first_prp_len;
2316 } else if (data_len && (sge_len == first_prp_len)) {
2317 sg_scmd = sg_next(sg_scmd);
2318 sge_addr = sg_dma_address(sg_scmd);
2319 sge_len = sg_dma_len(sg_scmd);
2323 offset = sge_addr & page_mask;
2325 /* Put PRP pointer due to page boundary*/
2326 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2327 if (unlikely(!page_mask_result)) {
2328 scmd_printk(KERN_NOTICE,
2329 scmd, "page boundary curr_buff: 0x%p\n",
2332 *curr_buff = cpu_to_le64(msg_dma);
2337 *curr_buff = cpu_to_le64(sge_addr);
2342 sge_addr += nvme_pg_size;
2343 sge_len -= nvme_pg_size;
2344 data_len -= nvme_pg_size;
2352 sg_scmd = sg_next(sg_scmd);
2353 sge_addr = sg_dma_address(sg_scmd);
2354 sge_len = sg_dma_len(sg_scmd);
2357 main_chain_element->Length =
2358 cpu_to_le32(num_prp_in_chain * sizeof(u64));
2363 base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2364 struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2366 u32 data_length = 0;
2367 bool build_prp = true;
2369 data_length = scsi_bufflen(scmd);
2371 (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
2376 /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
2379 if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2386 * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
2387 * determine if the driver needs to build a native SGL. If so, that native
2388 * SGL is built in the special contiguous buffers allocated especially for
2389 * PCIe SGL creation. If the driver will not build a native SGL, return
2390 * TRUE and a normal IEEE SGL will be built. Currently this routine
2392 * @ioc: per adapter object
2393 * @mpi_request: mf request pointer
2394 * @smid: system request message index
2395 * @scmd: scsi command
2396 * @pcie_device: points to the PCIe device's info
2398 * Return: 0 if native SGL was built, 1 if no SGL was built
2401 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2402 Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2403 struct _pcie_device *pcie_device)
2407 /* Get the SG list pointer and info. */
2408 sges_left = scsi_dma_map(scmd);
2409 if (sges_left < 0) {
2410 sdev_printk(KERN_ERR, scmd->device,
2411 "scsi_dma_map failed: request for %d bytes!\n",
2412 scsi_bufflen(scmd));
2416 /* Check if we need to build a native SG list. */
2417 if (base_is_prp_possible(ioc, pcie_device,
2418 scmd, sges_left) == 0) {
2419 /* We built a native SG list, just return. */
2424 * Build native NVMe PRP.
2426 base_make_prp_nvme(ioc, scmd, mpi_request,
2431 scsi_dma_unmap(scmd);
2436 * _base_add_sg_single_ieee - add sg element for IEEE format
2437 * @paddr: virtual address for SGE
2439 * @chain_offset: number of 128 byte elements from start of segment
2440 * @length: data transfer length
2441 * @dma_addr: Physical address
2444 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2445 dma_addr_t dma_addr)
2447 Mpi25IeeeSgeChain64_t *sgel = paddr;
2449 sgel->Flags = flags;
2450 sgel->NextChainOffset = chain_offset;
2451 sgel->Length = cpu_to_le32(length);
2452 sgel->Address = cpu_to_le64(dma_addr);
2456 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
2457 * @ioc: per adapter object
2458 * @paddr: virtual address for SGE
2460 * Create a zero length scatter gather entry to insure the IOCs hardware has
2461 * something to use if the target device goes brain dead and tries
2462 * to send data even when none is asked for.
2465 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2467 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2468 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2469 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
2471 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2475 * _base_build_sg_scmd - main sg creation routine
2476 * pcie_device is unused here!
2477 * @ioc: per adapter object
2478 * @scmd: scsi command
2479 * @smid: system request message index
2480 * @unused: unused pcie_device pointer
2483 * The main routine that builds scatter gather table from a given
2484 * scsi request sent via the .queuecommand main handler.
2486 * Return: 0 success, anything else error
2489 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2490 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
2492 Mpi2SCSIIORequest_t *mpi_request;
2493 dma_addr_t chain_dma;
2494 struct scatterlist *sg_scmd;
2495 void *sg_local, *chain;
2500 u32 sges_in_segment;
2502 u32 sgl_flags_last_element;
2503 u32 sgl_flags_end_buffer;
2504 struct chain_tracker *chain_req;
2506 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2508 /* init scatter gather flags */
2509 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2510 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2511 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2512 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2513 << MPI2_SGE_FLAGS_SHIFT;
2514 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2515 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2516 << MPI2_SGE_FLAGS_SHIFT;
2517 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2519 sg_scmd = scsi_sglist(scmd);
2520 sges_left = scsi_dma_map(scmd);
2521 if (sges_left < 0) {
2522 sdev_printk(KERN_ERR, scmd->device,
2523 "scsi_dma_map failed: request for %d bytes!\n",
2524 scsi_bufflen(scmd));
2528 sg_local = &mpi_request->SGL;
2529 sges_in_segment = ioc->max_sges_in_main_message;
2530 if (sges_left <= sges_in_segment)
2531 goto fill_in_last_segment;
2533 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2534 (sges_in_segment * ioc->sge_size))/4;
2536 /* fill in main message segment when there is a chain following */
2537 while (sges_in_segment) {
2538 if (sges_in_segment == 1)
2539 ioc->base_add_sg_single(sg_local,
2540 sgl_flags_last_element | sg_dma_len(sg_scmd),
2541 sg_dma_address(sg_scmd));
2543 ioc->base_add_sg_single(sg_local, sgl_flags |
2544 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2545 sg_scmd = sg_next(sg_scmd);
2546 sg_local += ioc->sge_size;
2551 /* initializing the chain flags and pointers */
2552 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
2553 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2556 chain = chain_req->chain_buffer;
2557 chain_dma = chain_req->chain_buffer_dma;
2559 sges_in_segment = (sges_left <=
2560 ioc->max_sges_in_chain_message) ? sges_left :
2561 ioc->max_sges_in_chain_message;
2562 chain_offset = (sges_left == sges_in_segment) ?
2563 0 : (sges_in_segment * ioc->sge_size)/4;
2564 chain_length = sges_in_segment * ioc->sge_size;
2566 chain_offset = chain_offset <<
2567 MPI2_SGE_CHAIN_OFFSET_SHIFT;
2568 chain_length += ioc->sge_size;
2570 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2571 chain_length, chain_dma);
2574 goto fill_in_last_segment;
2576 /* fill in chain segments */
2577 while (sges_in_segment) {
2578 if (sges_in_segment == 1)
2579 ioc->base_add_sg_single(sg_local,
2580 sgl_flags_last_element |
2581 sg_dma_len(sg_scmd),
2582 sg_dma_address(sg_scmd));
2584 ioc->base_add_sg_single(sg_local, sgl_flags |
2585 sg_dma_len(sg_scmd),
2586 sg_dma_address(sg_scmd));
2587 sg_scmd = sg_next(sg_scmd);
2588 sg_local += ioc->sge_size;
2593 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2596 chain = chain_req->chain_buffer;
2597 chain_dma = chain_req->chain_buffer_dma;
2601 fill_in_last_segment:
2603 /* fill the last segment */
2606 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2607 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2609 ioc->base_add_sg_single(sg_local, sgl_flags |
2610 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2611 sg_scmd = sg_next(sg_scmd);
2612 sg_local += ioc->sge_size;
2620 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
2621 * @ioc: per adapter object
2622 * @scmd: scsi command
2623 * @smid: system request message index
2624 * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
2625 * constructed on need.
2628 * The main routine that builds scatter gather table from a given
2629 * scsi request sent via the .queuecommand main handler.
2631 * Return: 0 success, anything else error
2634 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2635 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
2637 Mpi25SCSIIORequest_t *mpi_request;
2638 dma_addr_t chain_dma;
2639 struct scatterlist *sg_scmd;
2640 void *sg_local, *chain;
2644 u32 sges_in_segment;
2645 u8 simple_sgl_flags;
2646 u8 simple_sgl_flags_last;
2648 struct chain_tracker *chain_req;
2650 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2652 /* init scatter gather flags */
2653 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2654 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2655 simple_sgl_flags_last = simple_sgl_flags |
2656 MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2657 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2658 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2660 /* Check if we need to build a native SG list. */
2661 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2662 smid, scmd, pcie_device) == 0)) {
2663 /* We built a native SG list, just return. */
2667 sg_scmd = scsi_sglist(scmd);
2668 sges_left = scsi_dma_map(scmd);
2669 if (sges_left < 0) {
2670 sdev_printk(KERN_ERR, scmd->device,
2671 "scsi_dma_map failed: request for %d bytes!\n",
2672 scsi_bufflen(scmd));
2676 sg_local = &mpi_request->SGL;
2677 sges_in_segment = (ioc->request_sz -
2678 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2679 if (sges_left <= sges_in_segment)
2680 goto fill_in_last_segment;
2682 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
2683 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2685 /* fill in main message segment when there is a chain following */
2686 while (sges_in_segment > 1) {
2687 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2688 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2689 sg_scmd = sg_next(sg_scmd);
2690 sg_local += ioc->sge_size_ieee;
2695 /* initializing the pointers */
2696 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2699 chain = chain_req->chain_buffer;
2700 chain_dma = chain_req->chain_buffer_dma;
2702 sges_in_segment = (sges_left <=
2703 ioc->max_sges_in_chain_message) ? sges_left :
2704 ioc->max_sges_in_chain_message;
2705 chain_offset = (sges_left == sges_in_segment) ?
2706 0 : sges_in_segment;
2707 chain_length = sges_in_segment * ioc->sge_size_ieee;
2709 chain_length += ioc->sge_size_ieee;
2710 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2711 chain_offset, chain_length, chain_dma);
2715 goto fill_in_last_segment;
2717 /* fill in chain segments */
2718 while (sges_in_segment) {
2719 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2720 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2721 sg_scmd = sg_next(sg_scmd);
2722 sg_local += ioc->sge_size_ieee;
2727 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2730 chain = chain_req->chain_buffer;
2731 chain_dma = chain_req->chain_buffer_dma;
2735 fill_in_last_segment:
2737 /* fill the last segment */
2738 while (sges_left > 0) {
2740 _base_add_sg_single_ieee(sg_local,
2741 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2742 sg_dma_address(sg_scmd));
2744 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2745 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2746 sg_scmd = sg_next(sg_scmd);
2747 sg_local += ioc->sge_size_ieee;
2755 * _base_build_sg_ieee - build generic sg for IEEE format
2756 * @ioc: per adapter object
2757 * @psge: virtual address for SGE
2758 * @data_out_dma: physical address for WRITES
2759 * @data_out_sz: data xfer size for WRITES
2760 * @data_in_dma: physical address for READS
2761 * @data_in_sz: data xfer size for READS
2764 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2765 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2770 if (!data_out_sz && !data_in_sz) {
2771 _base_build_zero_len_sge_ieee(ioc, psge);
2775 if (data_out_sz && data_in_sz) {
2776 /* WRITE sgel first */
2777 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2778 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2779 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2783 psge += ioc->sge_size_ieee;
2785 /* READ sgel last */
2786 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2787 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2789 } else if (data_out_sz) /* WRITE */ {
2790 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2791 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2792 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2793 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2795 } else if (data_in_sz) /* READ */ {
2796 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2797 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2798 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2799 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2804 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2807 * _base_config_dma_addressing - set dma addressing
2808 * @ioc: per adapter object
2809 * @pdev: PCI device struct
2811 * Return: 0 for success, non-zero for failure.
2814 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2819 if (ioc->is_mcpu_endpoint ||
2820 sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
2821 dma_get_required_mask(&pdev->dev) <= 32)
2823 /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
2824 else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
2829 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
2830 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)))
2833 if (dma_mask > 32) {
2834 ioc->base_add_sg_single = &_base_add_sg_single_64;
2835 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2837 ioc->base_add_sg_single = &_base_add_sg_single_32;
2838 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
2842 ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2843 dma_mask, convert_to_kb(s.totalram));
2849 * _base_check_enable_msix - checks MSIX capabable.
2850 * @ioc: per adapter object
2852 * Check to see if card is capable of MSIX, and set number
2853 * of available msix vectors
2856 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2859 u16 message_control;
2861 /* Check whether controller SAS2008 B0 controller,
2862 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
2864 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
2865 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
2869 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2871 dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
2875 /* get msix vector count */
2876 /* NUMA_IO not supported for older controllers */
2877 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
2878 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
2879 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
2880 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
2881 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
2882 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
2883 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
2884 ioc->msix_vector_count = 1;
2886 pci_read_config_word(ioc->pdev, base + 2, &message_control);
2887 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2889 dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
2890 ioc->msix_vector_count));
2895 * _base_free_irq - free irq
2896 * @ioc: per adapter object
2898 * Freeing respective reply_queue from the list.
2901 _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
2903 struct adapter_reply_queue *reply_q, *next;
2905 if (list_empty(&ioc->reply_queue_list))
2908 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
2909 list_del(&reply_q->list);
2910 if (ioc->smp_affinity_enable)
2911 irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
2912 reply_q->msix_index), NULL);
2913 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
2920 * _base_request_irq - request irq
2921 * @ioc: per adapter object
2922 * @index: msix index into vector table
2924 * Inserting respective reply_queue into the list.
2927 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
2929 struct pci_dev *pdev = ioc->pdev;
2930 struct adapter_reply_queue *reply_q;
2933 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
2935 ioc_err(ioc, "unable to allocate memory %zu!\n",
2936 sizeof(struct adapter_reply_queue));
2940 reply_q->msix_index = index;
2942 atomic_set(&reply_q->busy, 0);
2943 if (ioc->msix_enable)
2944 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
2945 ioc->driver_name, ioc->id, index);
2947 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
2948 ioc->driver_name, ioc->id);
2949 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
2950 IRQF_SHARED, reply_q->name, reply_q);
2952 pr_err("%s: unable to allocate interrupt %d!\n",
2953 reply_q->name, pci_irq_vector(pdev, index));
2958 INIT_LIST_HEAD(&reply_q->list);
2959 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
2964 * _base_assign_reply_queues - assigning msix index for each cpu
2965 * @ioc: per adapter object
2967 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
2969 * It would nice if we could call irq_set_affinity, however it is not
2970 * an exported symbol
2973 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
2975 unsigned int cpu, nr_cpus, nr_msix, index = 0;
2976 struct adapter_reply_queue *reply_q;
2977 int local_numa_node;
2979 if (!_base_is_controller_msix_enabled(ioc))
2982 if (ioc->msix_load_balance)
2985 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
2987 nr_cpus = num_online_cpus();
2988 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
2989 ioc->facts.MaxMSIxVectors);
2993 if (ioc->smp_affinity_enable) {
2996 * set irq affinity to local numa node for those irqs
2997 * corresponding to high iops queues.
2999 if (ioc->high_iops_queues) {
3000 local_numa_node = dev_to_node(&ioc->pdev->dev);
3001 for (index = 0; index < ioc->high_iops_queues;
3003 irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
3004 index), cpumask_of_node(local_numa_node));
3008 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3009 const cpumask_t *mask;
3011 if (reply_q->msix_index < ioc->high_iops_queues)
3014 mask = pci_irq_get_affinity(ioc->pdev,
3015 reply_q->msix_index);
3017 ioc_warn(ioc, "no affinity for msi %x\n",
3018 reply_q->msix_index);
3022 for_each_cpu_and(cpu, mask, cpu_online_mask) {
3023 if (cpu >= ioc->cpu_msix_table_sz)
3025 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3032 cpu = cpumask_first(cpu_online_mask);
3033 nr_msix -= ioc->high_iops_queues;
3036 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3037 unsigned int i, group = nr_cpus / nr_msix;
3039 if (reply_q->msix_index < ioc->high_iops_queues)
3045 if (index < nr_cpus % nr_msix)
3048 for (i = 0 ; i < group ; i++) {
3049 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3050 cpu = cpumask_next(cpu, cpu_online_mask);
3057 * _base_check_and_enable_high_iops_queues - enable high iops mode
3058 * @ioc: per adapter object
3059 * @hba_msix_vector_count: msix vectors supported by HBA
3061 * Enable high iops queues only if
3062 * - HBA is a SEA/AERO controller and
3063 * - MSI-Xs vector supported by the HBA is 128 and
3064 * - total CPU count in the system >=16 and
3065 * - loaded driver with default max_msix_vectors module parameter and
3066 * - system booted in non kdump mode
3071 _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
3072 int hba_msix_vector_count)
3076 if (perf_mode == MPT_PERF_MODE_IOPS ||
3077 perf_mode == MPT_PERF_MODE_LATENCY) {
3078 ioc->high_iops_queues = 0;
3082 if (perf_mode == MPT_PERF_MODE_DEFAULT) {
3084 pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
3085 speed = lnksta & PCI_EXP_LNKSTA_CLS;
3088 ioc->high_iops_queues = 0;
3093 if (!reset_devices && ioc->is_aero_ioc &&
3094 hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
3095 num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
3096 max_msix_vectors == -1)
3097 ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
3099 ioc->high_iops_queues = 0;
3103 * _base_disable_msix - disables msix
3104 * @ioc: per adapter object
3108 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
3110 if (!ioc->msix_enable)
3112 pci_free_irq_vectors(ioc->pdev);
3113 ioc->msix_enable = 0;
3117 * _base_alloc_irq_vectors - allocate msix vectors
3118 * @ioc: per adapter object
3122 _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
3124 int i, irq_flags = PCI_IRQ_MSIX;
3125 struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
3126 struct irq_affinity *descp = &desc;
3128 if (ioc->smp_affinity_enable)
3129 irq_flags |= PCI_IRQ_AFFINITY;
3133 ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
3134 ioc->reply_queue_count);
3136 i = pci_alloc_irq_vectors_affinity(ioc->pdev,
3137 ioc->high_iops_queues,
3138 ioc->reply_queue_count, irq_flags, descp);
3144 * _base_enable_msix - enables msix, failback to io_apic
3145 * @ioc: per adapter object
3149 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3152 int i, local_max_msix_vectors;
3155 ioc->msix_load_balance = false;
3157 if (msix_disable == -1 || msix_disable == 0)
3163 if (_base_check_enable_msix(ioc) != 0)
3166 ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
3167 pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
3168 ioc->cpu_count, max_msix_vectors);
3169 if (ioc->is_aero_ioc)
3170 _base_check_and_enable_high_iops_queues(ioc,
3171 ioc->msix_vector_count);
3172 ioc->reply_queue_count =
3173 min_t(int, ioc->cpu_count + ioc->high_iops_queues,
3174 ioc->msix_vector_count);
3176 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
3177 local_max_msix_vectors = (reset_devices) ? 1 : 8;
3179 local_max_msix_vectors = max_msix_vectors;
3181 if (local_max_msix_vectors > 0)
3182 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
3183 ioc->reply_queue_count);
3184 else if (local_max_msix_vectors == 0)
3188 * Enable msix_load_balance only if combined reply queue mode is
3189 * disabled on SAS3 & above generation HBA devices.
3191 if (!ioc->combined_reply_queue &&
3192 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3194 "combined ReplyQueue is off, Enabling msix load balance\n");
3195 ioc->msix_load_balance = true;
3199 * smp affinity setting is not need when msix load balance
3202 if (ioc->msix_load_balance)
3203 ioc->smp_affinity_enable = 0;
3205 r = _base_alloc_irq_vectors(ioc);
3207 ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
3211 ioc->msix_enable = 1;
3212 ioc->reply_queue_count = r;
3213 for (i = 0; i < ioc->reply_queue_count; i++) {
3214 r = _base_request_irq(ioc, i);
3216 _base_free_irq(ioc);
3217 _base_disable_msix(ioc);
3222 ioc_info(ioc, "High IOPs queues : %s\n",
3223 ioc->high_iops_queues ? "enabled" : "disabled");
3227 /* failback to io_apic interrupt routing */
3229 ioc->high_iops_queues = 0;
3230 ioc_info(ioc, "High IOPs queues : disabled\n");
3231 ioc->reply_queue_count = 1;
3232 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
3235 ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
3238 r = _base_request_irq(ioc, 0);
3244 * mpt3sas_base_unmap_resources - free controller resources
3245 * @ioc: per adapter object
3248 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
3250 struct pci_dev *pdev = ioc->pdev;
3252 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3254 _base_free_irq(ioc);
3255 _base_disable_msix(ioc);
3257 kfree(ioc->replyPostRegisterIndex);
3258 ioc->replyPostRegisterIndex = NULL;
3261 if (ioc->chip_phys) {
3266 if (pci_is_enabled(pdev)) {
3267 pci_release_selected_regions(ioc->pdev, ioc->bars);
3268 pci_disable_pcie_error_reporting(pdev);
3269 pci_disable_device(pdev);
3274 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3277 * _base_check_for_fault_and_issue_reset - check if IOC is in fault state
3278 * and if it is in fault state then issue diag reset.
3279 * @ioc: per adapter object
3281 * Returns: 0 for success, non-zero for failure.
3284 _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
3289 dinitprintk(ioc, pr_info("%s\n", __func__));
3290 if (ioc->pci_error_recovery)
3292 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3293 dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
3295 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3296 mpt3sas_print_fault_code(ioc, ioc_state &
3297 MPI2_DOORBELL_DATA_MASK);
3298 rc = _base_diag_reset(ioc);
3299 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3300 MPI2_IOC_STATE_COREDUMP) {
3301 mpt3sas_print_coredump_info(ioc, ioc_state &
3302 MPI2_DOORBELL_DATA_MASK);
3303 mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
3304 rc = _base_diag_reset(ioc);
3311 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
3312 * @ioc: per adapter object
3314 * Return: 0 for success, non-zero for failure.
3317 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3319 struct pci_dev *pdev = ioc->pdev;
3324 phys_addr_t chip_phys = 0;
3325 struct adapter_reply_queue *reply_q;
3327 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3329 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3330 if (pci_enable_device_mem(pdev)) {
3331 ioc_warn(ioc, "pci_enable_device_mem: failed\n");
3337 if (pci_request_selected_regions(pdev, ioc->bars,
3338 ioc->driver_name)) {
3339 ioc_warn(ioc, "pci_request_selected_regions: failed\n");
3345 /* AER (Advanced Error Reporting) hooks */
3346 pci_enable_pcie_error_reporting(pdev);
3348 pci_set_master(pdev);
3351 if (_base_config_dma_addressing(ioc, pdev) != 0) {
3352 ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
3357 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
3358 (!memap_sz || !pio_sz); i++) {
3359 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
3362 pio_chip = (u64)pci_resource_start(pdev, i);
3363 pio_sz = pci_resource_len(pdev, i);
3364 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3367 ioc->chip_phys = pci_resource_start(pdev, i);
3368 chip_phys = ioc->chip_phys;
3369 memap_sz = pci_resource_len(pdev, i);
3370 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
3374 if (ioc->chip == NULL) {
3376 "unable to map adapter memory! or resource not found\n");
3381 mpt3sas_base_mask_interrupts(ioc);
3383 r = _base_get_ioc_facts(ioc);
3385 rc = _base_check_for_fault_and_issue_reset(ioc);
3386 if (rc || (_base_get_ioc_facts(ioc)))
3390 if (!ioc->rdpq_array_enable_assigned) {
3391 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3392 ioc->rdpq_array_enable_assigned = 1;
3395 r = _base_enable_msix(ioc);
3399 if (!ioc->is_driver_loading)
3400 _base_init_irqpolls(ioc);
3401 /* Use the Combined reply queue feature only for SAS3 C0 & higher
3402 * revision HBAs and also only when reply queue count is greater than 8
3404 if (ioc->combined_reply_queue) {
3405 /* Determine the Supplemental Reply Post Host Index Registers
3406 * Addresse. Supplemental Reply Post Host Index Registers
3407 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
3408 * each register is at offset bytes of
3409 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
3411 ioc->replyPostRegisterIndex = kcalloc(
3412 ioc->combined_reply_index_count,
3413 sizeof(resource_size_t *), GFP_KERNEL);
3414 if (!ioc->replyPostRegisterIndex) {
3416 "allocation for replyPostRegisterIndex failed!\n");
3421 for (i = 0; i < ioc->combined_reply_index_count; i++) {
3422 ioc->replyPostRegisterIndex[i] = (resource_size_t *)
3423 ((u8 __force *)&ioc->chip->Doorbell +
3424 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3425 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3429 if (ioc->is_warpdrive) {
3430 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3431 &ioc->chip->ReplyPostHostIndex;
3433 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3434 ioc->reply_post_host_index[i] =
3435 (resource_size_t __iomem *)
3436 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3440 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
3441 pr_info("%s: %s enabled: IRQ %d\n",
3443 ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3444 pci_irq_vector(ioc->pdev, reply_q->msix_index));
3446 ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3447 &chip_phys, ioc->chip, memap_sz);
3448 ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3449 (unsigned long long)pio_chip, pio_sz);
3451 /* Save PCI configuration state for recovery from PCI AER/EEH errors */
3452 pci_save_state(pdev);
3456 mpt3sas_base_unmap_resources(ioc);
3461 * mpt3sas_base_get_msg_frame - obtain request mf pointer
3462 * @ioc: per adapter object
3463 * @smid: system request message index(smid zero is invalid)
3465 * Return: virt pointer to message frame.
3468 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3470 return (void *)(ioc->request + (smid * ioc->request_sz));
3474 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
3475 * @ioc: per adapter object
3476 * @smid: system request message index
3478 * Return: virt pointer to sense buffer.
3481 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3483 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3487 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
3488 * @ioc: per adapter object
3489 * @smid: system request message index
3491 * Return: phys pointer to the low 32bit address of the sense buffer.
3494 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3496 return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3497 SCSI_SENSE_BUFFERSIZE));
3501 * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
3502 * @ioc: per adapter object
3503 * @smid: system request message index
3505 * Return: virt pointer to a PCIe SGL.
3508 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3510 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3514 * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
3515 * @ioc: per adapter object
3516 * @smid: system request message index
3518 * Return: phys pointer to the address of the PCIe buffer.
3521 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3523 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3527 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
3528 * @ioc: per adapter object
3529 * @phys_addr: lower 32 physical addr of the reply
3531 * Converts 32bit lower physical addr into a virt address.
3534 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3538 return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3542 * _base_get_msix_index - get the msix index
3543 * @ioc: per adapter object
3544 * @scmd: scsi_cmnd object
3546 * returns msix index of general reply queues,
3547 * i.e. reply queue on which IO request's reply
3548 * should be posted by the HBA firmware.
3551 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
3552 struct scsi_cmnd *scmd)
3554 /* Enables reply_queue load balancing */
3555 if (ioc->msix_load_balance)
3556 return ioc->reply_queue_count ?
3557 base_mod64(atomic64_add_return(1,
3558 &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
3560 return ioc->cpu_msix_table[raw_smp_processor_id()];
3564 * _base_sdev_nr_inflight_request -get number of inflight requests
3565 * of a request queue.
3566 * @q: request_queue object
3568 * returns number of inflight request of a request queue.
3570 inline unsigned long
3571 _base_sdev_nr_inflight_request(struct request_queue *q)
3573 struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[0];
3575 return atomic_read(&hctx->nr_active);
3580 * _base_get_high_iops_msix_index - get the msix index of
3582 * @ioc: per adapter object
3583 * @scmd: scsi_cmnd object
3585 * Returns: msix index of high iops reply queues.
3586 * i.e. high iops reply queue on which IO request's
3587 * reply should be posted by the HBA firmware.
3590 _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
3591 struct scsi_cmnd *scmd)
3594 * Round robin the IO interrupts among the high iops
3595 * reply queues in terms of batch count 16 when outstanding
3596 * IOs on the target device is >=8.
3598 if (_base_sdev_nr_inflight_request(scmd->device->request_queue) >
3599 MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
3601 atomic64_add_return(1, &ioc->high_iops_outstanding) /
3602 MPT3SAS_HIGH_IOPS_BATCH_COUNT),
3603 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
3605 return _base_get_msix_index(ioc, scmd);
3609 * mpt3sas_base_get_smid - obtain a free smid from internal queue
3610 * @ioc: per adapter object
3611 * @cb_idx: callback index
3613 * Return: smid (zero is invalid)
3616 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3618 unsigned long flags;
3619 struct request_tracker *request;
3622 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3623 if (list_empty(&ioc->internal_free_list)) {
3624 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3625 ioc_err(ioc, "%s: smid not available\n", __func__);
3629 request = list_entry(ioc->internal_free_list.next,
3630 struct request_tracker, tracker_list);
3631 request->cb_idx = cb_idx;
3632 smid = request->smid;
3633 list_del(&request->tracker_list);
3634 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3639 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
3640 * @ioc: per adapter object
3641 * @cb_idx: callback index
3642 * @scmd: pointer to scsi command object
3644 * Return: smid (zero is invalid)
3647 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3648 struct scsi_cmnd *scmd)
3650 struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3651 unsigned int tag = scmd->request->tag;
3655 request->cb_idx = cb_idx;
3656 request->smid = smid;
3657 request->scmd = scmd;
3658 INIT_LIST_HEAD(&request->chain_list);
3663 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
3664 * @ioc: per adapter object
3665 * @cb_idx: callback index
3667 * Return: smid (zero is invalid)
3670 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3672 unsigned long flags;
3673 struct request_tracker *request;
3676 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3677 if (list_empty(&ioc->hpr_free_list)) {
3678 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3682 request = list_entry(ioc->hpr_free_list.next,
3683 struct request_tracker, tracker_list);
3684 request->cb_idx = cb_idx;
3685 smid = request->smid;
3686 list_del(&request->tracker_list);
3687 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3692 _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3695 * See _wait_for_commands_to_complete() call with regards to this code.
3697 if (ioc->shost_recovery && ioc->pending_io_count) {
3698 ioc->pending_io_count = scsi_host_busy(ioc->shost);
3699 if (ioc->pending_io_count == 0)
3700 wake_up(&ioc->reset_wq);
3704 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3705 struct scsiio_tracker *st)
3707 if (WARN_ON(st->smid == 0))
3712 atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
3717 * mpt3sas_base_free_smid - put smid back on free_list
3718 * @ioc: per adapter object
3719 * @smid: system request message index
3722 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3724 unsigned long flags;
3727 if (smid < ioc->hi_priority_smid) {
3728 struct scsiio_tracker *st;
3731 st = _get_st_from_smid(ioc, smid);
3733 _base_recovery_check(ioc);
3737 /* Clear MPI request frame */
3738 request = mpt3sas_base_get_msg_frame(ioc, smid);
3739 memset(request, 0, ioc->request_sz);
3741 mpt3sas_base_clear_st(ioc, st);
3742 _base_recovery_check(ioc);
3746 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3747 if (smid < ioc->internal_smid) {
3749 i = smid - ioc->hi_priority_smid;
3750 ioc->hpr_lookup[i].cb_idx = 0xFF;
3751 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
3752 } else if (smid <= ioc->hba_queue_depth) {
3753 /* internal queue */
3754 i = smid - ioc->internal_smid;
3755 ioc->internal_lookup[i].cb_idx = 0xFF;
3756 list_add(&ioc->internal_lookup[i].tracker_list,
3757 &ioc->internal_free_list);
3759 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3763 * _base_mpi_ep_writeq - 32 bit write to MMIO
3765 * @addr: address in MMIO space
3766 * @writeq_lock: spin lock
3768 * This special handling for MPI EP to take care of 32 bit
3769 * environment where its not quarenteed to send the entire word
3773 _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3774 spinlock_t *writeq_lock)
3776 unsigned long flags;
3778 spin_lock_irqsave(writeq_lock, flags);
3779 __raw_writel((u32)(b), addr);
3780 __raw_writel((u32)(b >> 32), (addr + 4));
3781 spin_unlock_irqrestore(writeq_lock, flags);
3785 * _base_writeq - 64 bit write to MMIO
3787 * @addr: address in MMIO space
3788 * @writeq_lock: spin lock
3790 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
3791 * care of 32 bit environment where its not quarenteed to send the entire word
3794 #if defined(writeq) && defined(CONFIG_64BIT)
3796 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3799 __raw_writeq(b, addr);
3804 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3806 _base_mpi_ep_writeq(b, addr, writeq_lock);
3811 * _base_set_and_get_msix_index - get the msix index and assign to msix_io
3812 * variable of scsi tracker
3813 * @ioc: per adapter object
3814 * @smid: system request message index
3816 * returns msix index.
3819 _base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3821 struct scsiio_tracker *st = NULL;
3823 if (smid < ioc->hi_priority_smid)
3824 st = _get_st_from_smid(ioc, smid);
3827 return _base_get_msix_index(ioc, NULL);
3829 st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
3834 * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
3835 * @ioc: per adapter object
3836 * @smid: system request message index
3837 * @handle: device handle
3840 _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
3841 u16 smid, u16 handle)
3843 Mpi2RequestDescriptorUnion_t descriptor;
3844 u64 *request = (u64 *)&descriptor;
3845 void *mpi_req_iomem;
3846 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3848 _clone_sg_entries(ioc, (void *) mfp, smid);
3849 mpi_req_iomem = (void __force *)ioc->chip +
3850 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3851 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3853 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3854 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3855 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3856 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3857 descriptor.SCSIIO.LMID = 0;
3858 _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3859 &ioc->scsi_lookup_lock);
3863 * _base_put_smid_scsi_io - send SCSI_IO request to firmware
3864 * @ioc: per adapter object
3865 * @smid: system request message index
3866 * @handle: device handle
3869 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3871 Mpi2RequestDescriptorUnion_t descriptor;
3872 u64 *request = (u64 *)&descriptor;
3875 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3876 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3877 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3878 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3879 descriptor.SCSIIO.LMID = 0;
3880 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3881 &ioc->scsi_lookup_lock);
3885 * _base_put_smid_fast_path - send fast path request to firmware
3886 * @ioc: per adapter object
3887 * @smid: system request message index
3888 * @handle: device handle
3891 _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3894 Mpi2RequestDescriptorUnion_t descriptor;
3895 u64 *request = (u64 *)&descriptor;
3897 descriptor.SCSIIO.RequestFlags =
3898 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
3899 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3900 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3901 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3902 descriptor.SCSIIO.LMID = 0;
3903 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3904 &ioc->scsi_lookup_lock);
3908 * _base_put_smid_hi_priority - send Task Management request to firmware
3909 * @ioc: per adapter object
3910 * @smid: system request message index
3911 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
3914 _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3917 Mpi2RequestDescriptorUnion_t descriptor;
3918 void *mpi_req_iomem;
3921 if (ioc->is_mcpu_endpoint) {
3922 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3924 /* TBD 256 is offset within sys register. */
3925 mpi_req_iomem = (void __force *)ioc->chip
3926 + MPI_FRAME_START_OFFSET
3927 + (smid * ioc->request_sz);
3928 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3932 request = (u64 *)&descriptor;
3934 descriptor.HighPriority.RequestFlags =
3935 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3936 descriptor.HighPriority.MSIxIndex = msix_task;
3937 descriptor.HighPriority.SMID = cpu_to_le16(smid);
3938 descriptor.HighPriority.LMID = 0;
3939 descriptor.HighPriority.Reserved1 = 0;
3940 if (ioc->is_mcpu_endpoint)
3941 _base_mpi_ep_writeq(*request,
3942 &ioc->chip->RequestDescriptorPostLow,
3943 &ioc->scsi_lookup_lock);
3945 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3946 &ioc->scsi_lookup_lock);
3950 * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
3952 * @ioc: per adapter object
3953 * @smid: system request message index
3956 mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3958 Mpi2RequestDescriptorUnion_t descriptor;
3959 u64 *request = (u64 *)&descriptor;
3961 descriptor.Default.RequestFlags =
3962 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
3963 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3964 descriptor.Default.SMID = cpu_to_le16(smid);
3965 descriptor.Default.LMID = 0;
3966 descriptor.Default.DescriptorTypeDependent = 0;
3967 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3968 &ioc->scsi_lookup_lock);
3972 * _base_put_smid_default - Default, primarily used for config pages
3973 * @ioc: per adapter object
3974 * @smid: system request message index
3977 _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3979 Mpi2RequestDescriptorUnion_t descriptor;
3980 void *mpi_req_iomem;
3983 if (ioc->is_mcpu_endpoint) {
3984 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3986 _clone_sg_entries(ioc, (void *) mfp, smid);
3987 /* TBD 256 is offset within sys register */
3988 mpi_req_iomem = (void __force *)ioc->chip +
3989 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3990 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3993 request = (u64 *)&descriptor;
3994 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3995 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3996 descriptor.Default.SMID = cpu_to_le16(smid);
3997 descriptor.Default.LMID = 0;
3998 descriptor.Default.DescriptorTypeDependent = 0;
3999 if (ioc->is_mcpu_endpoint)
4000 _base_mpi_ep_writeq(*request,
4001 &ioc->chip->RequestDescriptorPostLow,
4002 &ioc->scsi_lookup_lock);
4004 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4005 &ioc->scsi_lookup_lock);
4009 * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
4010 * Atomic Request Descriptor
4011 * @ioc: per adapter object
4012 * @smid: system request message index
4013 * @handle: device handle, unused in this function, for function type match
4018 _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4021 Mpi26AtomicRequestDescriptor_t descriptor;
4022 u32 *request = (u32 *)&descriptor;
4024 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4025 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4026 descriptor.SMID = cpu_to_le16(smid);
4028 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4032 * _base_put_smid_fast_path_atomic - send fast path request to firmware
4033 * using Atomic Request Descriptor
4034 * @ioc: per adapter object
4035 * @smid: system request message index
4036 * @handle: device handle, unused in this function, for function type match
4040 _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4043 Mpi26AtomicRequestDescriptor_t descriptor;
4044 u32 *request = (u32 *)&descriptor;
4046 descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
4047 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4048 descriptor.SMID = cpu_to_le16(smid);
4050 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4054 * _base_put_smid_hi_priority_atomic - send Task Management request to
4055 * firmware using Atomic Request Descriptor
4056 * @ioc: per adapter object
4057 * @smid: system request message index
4058 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
4063 _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4066 Mpi26AtomicRequestDescriptor_t descriptor;
4067 u32 *request = (u32 *)&descriptor;
4069 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4070 descriptor.MSIxIndex = msix_task;
4071 descriptor.SMID = cpu_to_le16(smid);
4073 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4077 * _base_put_smid_default - Default, primarily used for config pages
4078 * use Atomic Request Descriptor
4079 * @ioc: per adapter object
4080 * @smid: system request message index
4085 _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4087 Mpi26AtomicRequestDescriptor_t descriptor;
4088 u32 *request = (u32 *)&descriptor;
4090 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4091 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4092 descriptor.SMID = cpu_to_le16(smid);
4094 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4098 * _base_display_OEMs_branding - Display branding string
4099 * @ioc: per adapter object
4102 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
4104 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
4107 switch (ioc->pdev->subsystem_vendor) {
4108 case PCI_VENDOR_ID_INTEL:
4109 switch (ioc->pdev->device) {
4110 case MPI2_MFGPAGE_DEVID_SAS2008:
4111 switch (ioc->pdev->subsystem_device) {
4112 case MPT2SAS_INTEL_RMS2LL080_SSDID:
4113 ioc_info(ioc, "%s\n",
4114 MPT2SAS_INTEL_RMS2LL080_BRANDING);
4116 case MPT2SAS_INTEL_RMS2LL040_SSDID:
4117 ioc_info(ioc, "%s\n",
4118 MPT2SAS_INTEL_RMS2LL040_BRANDING);
4120 case MPT2SAS_INTEL_SSD910_SSDID:
4121 ioc_info(ioc, "%s\n",
4122 MPT2SAS_INTEL_SSD910_BRANDING);
4125 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4126 ioc->pdev->subsystem_device);
4130 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4131 switch (ioc->pdev->subsystem_device) {
4132 case MPT2SAS_INTEL_RS25GB008_SSDID:
4133 ioc_info(ioc, "%s\n",
4134 MPT2SAS_INTEL_RS25GB008_BRANDING);
4136 case MPT2SAS_INTEL_RMS25JB080_SSDID:
4137 ioc_info(ioc, "%s\n",
4138 MPT2SAS_INTEL_RMS25JB080_BRANDING);
4140 case MPT2SAS_INTEL_RMS25JB040_SSDID:
4141 ioc_info(ioc, "%s\n",
4142 MPT2SAS_INTEL_RMS25JB040_BRANDING);
4144 case MPT2SAS_INTEL_RMS25KB080_SSDID:
4145 ioc_info(ioc, "%s\n",
4146 MPT2SAS_INTEL_RMS25KB080_BRANDING);
4148 case MPT2SAS_INTEL_RMS25KB040_SSDID:
4149 ioc_info(ioc, "%s\n",
4150 MPT2SAS_INTEL_RMS25KB040_BRANDING);
4152 case MPT2SAS_INTEL_RMS25LB040_SSDID:
4153 ioc_info(ioc, "%s\n",
4154 MPT2SAS_INTEL_RMS25LB040_BRANDING);
4156 case MPT2SAS_INTEL_RMS25LB080_SSDID:
4157 ioc_info(ioc, "%s\n",
4158 MPT2SAS_INTEL_RMS25LB080_BRANDING);
4161 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4162 ioc->pdev->subsystem_device);
4166 case MPI25_MFGPAGE_DEVID_SAS3008:
4167 switch (ioc->pdev->subsystem_device) {
4168 case MPT3SAS_INTEL_RMS3JC080_SSDID:
4169 ioc_info(ioc, "%s\n",
4170 MPT3SAS_INTEL_RMS3JC080_BRANDING);
4173 case MPT3SAS_INTEL_RS3GC008_SSDID:
4174 ioc_info(ioc, "%s\n",
4175 MPT3SAS_INTEL_RS3GC008_BRANDING);
4177 case MPT3SAS_INTEL_RS3FC044_SSDID:
4178 ioc_info(ioc, "%s\n",
4179 MPT3SAS_INTEL_RS3FC044_BRANDING);
4181 case MPT3SAS_INTEL_RS3UC080_SSDID:
4182 ioc_info(ioc, "%s\n",
4183 MPT3SAS_INTEL_RS3UC080_BRANDING);
4186 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4187 ioc->pdev->subsystem_device);
4192 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4193 ioc->pdev->subsystem_device);
4197 case PCI_VENDOR_ID_DELL:
4198 switch (ioc->pdev->device) {
4199 case MPI2_MFGPAGE_DEVID_SAS2008:
4200 switch (ioc->pdev->subsystem_device) {
4201 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
4202 ioc_info(ioc, "%s\n",
4203 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
4205 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
4206 ioc_info(ioc, "%s\n",
4207 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
4209 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
4210 ioc_info(ioc, "%s\n",
4211 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
4213 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
4214 ioc_info(ioc, "%s\n",
4215 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
4217 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
4218 ioc_info(ioc, "%s\n",
4219 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
4221 case MPT2SAS_DELL_PERC_H200_SSDID:
4222 ioc_info(ioc, "%s\n",
4223 MPT2SAS_DELL_PERC_H200_BRANDING);
4225 case MPT2SAS_DELL_6GBPS_SAS_SSDID:
4226 ioc_info(ioc, "%s\n",
4227 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
4230 ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
4231 ioc->pdev->subsystem_device);
4235 case MPI25_MFGPAGE_DEVID_SAS3008:
4236 switch (ioc->pdev->subsystem_device) {
4237 case MPT3SAS_DELL_12G_HBA_SSDID:
4238 ioc_info(ioc, "%s\n",
4239 MPT3SAS_DELL_12G_HBA_BRANDING);
4242 ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
4243 ioc->pdev->subsystem_device);
4248 ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
4249 ioc->pdev->subsystem_device);
4253 case PCI_VENDOR_ID_CISCO:
4254 switch (ioc->pdev->device) {
4255 case MPI25_MFGPAGE_DEVID_SAS3008:
4256 switch (ioc->pdev->subsystem_device) {
4257 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
4258 ioc_info(ioc, "%s\n",
4259 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
4261 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
4262 ioc_info(ioc, "%s\n",
4263 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
4265 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4266 ioc_info(ioc, "%s\n",
4267 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4270 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4271 ioc->pdev->subsystem_device);
4275 case MPI25_MFGPAGE_DEVID_SAS3108_1:
4276 switch (ioc->pdev->subsystem_device) {
4277 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4278 ioc_info(ioc, "%s\n",
4279 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4281 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
4282 ioc_info(ioc, "%s\n",
4283 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
4286 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4287 ioc->pdev->subsystem_device);
4292 ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
4293 ioc->pdev->subsystem_device);
4297 case MPT2SAS_HP_3PAR_SSVID:
4298 switch (ioc->pdev->device) {
4299 case MPI2_MFGPAGE_DEVID_SAS2004:
4300 switch (ioc->pdev->subsystem_device) {
4301 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
4302 ioc_info(ioc, "%s\n",
4303 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
4306 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4307 ioc->pdev->subsystem_device);
4311 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4312 switch (ioc->pdev->subsystem_device) {
4313 case MPT2SAS_HP_2_4_INTERNAL_SSDID:
4314 ioc_info(ioc, "%s\n",
4315 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
4317 case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
4318 ioc_info(ioc, "%s\n",
4319 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
4321 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
4322 ioc_info(ioc, "%s\n",
4323 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
4325 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
4326 ioc_info(ioc, "%s\n",
4327 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
4330 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4331 ioc->pdev->subsystem_device);
4336 ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
4337 ioc->pdev->subsystem_device);
4346 * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
4347 * version from FW Image Header.
4348 * @ioc: per adapter object
4350 * Return: 0 for success, non-zero for failure.
4353 _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
4355 Mpi2FWImageHeader_t *fw_img_hdr;
4356 Mpi26ComponentImageHeader_t *cmp_img_hdr;
4357 Mpi25FWUploadRequest_t *mpi_request;
4358 Mpi2FWUploadReply_t mpi_reply;
4360 u32 package_version = 0;
4361 void *fwpkg_data = NULL;
4362 dma_addr_t fwpkg_data_dma;
4363 u16 smid, ioc_status;
4366 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4368 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4369 ioc_err(ioc, "%s: internal command already in use\n", __func__);
4373 data_length = sizeof(Mpi2FWImageHeader_t);
4374 fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
4375 &fwpkg_data_dma, GFP_KERNEL);
4378 "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
4379 __FILE__, __LINE__, __func__);
4383 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4385 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4390 ioc->base_cmds.status = MPT3_CMD_PENDING;
4391 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4392 ioc->base_cmds.smid = smid;
4393 memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
4394 mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
4395 mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
4396 mpi_request->ImageSize = cpu_to_le32(data_length);
4397 ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
4399 init_completion(&ioc->base_cmds.done);
4400 ioc->put_smid_default(ioc, smid);
4401 /* Wait for 15 seconds */
4402 wait_for_completion_timeout(&ioc->base_cmds.done,
4403 FW_IMG_HDR_READ_TIMEOUT*HZ);
4404 ioc_info(ioc, "%s: complete\n", __func__);
4405 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4406 ioc_err(ioc, "%s: timeout\n", __func__);
4407 _debug_dump_mf(mpi_request,
4408 sizeof(Mpi25FWUploadRequest_t)/4);
4411 memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
4412 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
4413 memcpy(&mpi_reply, ioc->base_cmds.reply,
4414 sizeof(Mpi2FWUploadReply_t));
4415 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4416 MPI2_IOCSTATUS_MASK;
4417 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4418 fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
4419 if (le32_to_cpu(fw_img_hdr->Signature) ==
4420 MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
4422 (Mpi26ComponentImageHeader_t *)
4426 cmp_img_hdr->ApplicationSpecific);
4430 fw_img_hdr->PackageVersion.Word);
4431 if (package_version)
4433 "FW Package Ver(%02d.%02d.%02d.%02d)\n",
4434 ((package_version) & 0xFF000000) >> 24,
4435 ((package_version) & 0x00FF0000) >> 16,
4436 ((package_version) & 0x0000FF00) >> 8,
4437 (package_version) & 0x000000FF);
4439 _debug_dump_mf(&mpi_reply,
4440 sizeof(Mpi2FWUploadReply_t)/4);
4444 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4447 dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
4453 * _base_display_ioc_capabilities - Disply IOC's capabilities.
4454 * @ioc: per adapter object
4457 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
4461 u32 iounit_pg1_flags;
4464 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
4465 strncpy(desc, ioc->manu_pg0.ChipName, 16);
4466 ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
4468 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
4469 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
4470 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
4471 ioc->facts.FWVersion.Word & 0x000000FF,
4472 ioc->pdev->revision,
4473 (bios_version & 0xFF000000) >> 24,
4474 (bios_version & 0x00FF0000) >> 16,
4475 (bios_version & 0x0000FF00) >> 8,
4476 bios_version & 0x000000FF);
4478 _base_display_OEMs_branding(ioc);
4480 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4481 pr_info("%sNVMe", i ? "," : "");
4485 ioc_info(ioc, "Protocol=(");
4487 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
4488 pr_cont("Initiator");
4492 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
4493 pr_cont("%sTarget", i ? "," : "");
4498 pr_cont("), Capabilities=(");
4500 if (!ioc->hide_ir_msg) {
4501 if (ioc->facts.IOCCapabilities &
4502 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
4508 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
4509 pr_cont("%sTLR", i ? "," : "");
4513 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
4514 pr_cont("%sMulticast", i ? "," : "");
4518 if (ioc->facts.IOCCapabilities &
4519 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
4520 pr_cont("%sBIDI Target", i ? "," : "");
4524 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4525 pr_cont("%sEEDP", i ? "," : "");
4529 if (ioc->facts.IOCCapabilities &
4530 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
4531 pr_cont("%sSnapshot Buffer", i ? "," : "");
4535 if (ioc->facts.IOCCapabilities &
4536 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
4537 pr_cont("%sDiag Trace Buffer", i ? "," : "");
4541 if (ioc->facts.IOCCapabilities &
4542 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
4543 pr_cont("%sDiag Extended Buffer", i ? "," : "");
4547 if (ioc->facts.IOCCapabilities &
4548 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
4549 pr_cont("%sTask Set Full", i ? "," : "");
4553 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4554 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
4555 pr_cont("%sNCQ", i ? "," : "");
4563 * mpt3sas_base_update_missing_delay - change the missing delay timers
4564 * @ioc: per adapter object
4565 * @device_missing_delay: amount of time till device is reported missing
4566 * @io_missing_delay: interval IO is returned when there is a missing device
4568 * Passed on the command line, this function will modify the device missing
4569 * delay, as well as the io missing delay. This should be called at driver
4573 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4574 u16 device_missing_delay, u8 io_missing_delay)
4576 u16 dmd, dmd_new, dmd_orignal;
4577 u8 io_missing_delay_original;
4579 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4580 Mpi2ConfigReply_t mpi_reply;
4584 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4588 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4589 sizeof(Mpi2SasIOUnit1PhyData_t));
4590 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4591 if (!sas_iounit_pg1) {
4592 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4593 __FILE__, __LINE__, __func__);
4596 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4597 sas_iounit_pg1, sz))) {
4598 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4599 __FILE__, __LINE__, __func__);
4602 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4603 MPI2_IOCSTATUS_MASK;
4604 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4605 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4606 __FILE__, __LINE__, __func__);
4610 /* device missing delay */
4611 dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4612 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4613 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4615 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4617 if (device_missing_delay > 0x7F) {
4618 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4619 device_missing_delay;
4621 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4623 dmd = device_missing_delay;
4624 sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4626 /* io missing delay */
4627 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4628 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4630 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4632 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4634 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4637 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4638 ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4639 dmd_orignal, dmd_new);
4640 ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4641 io_missing_delay_original,
4643 ioc->device_missing_delay = dmd_new;
4644 ioc->io_missing_delay = io_missing_delay;
4648 kfree(sas_iounit_pg1);
4652 * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields
4653 * according to performance mode.
4654 * @ioc : per adapter object
4659 _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
4661 Mpi2IOCPage1_t ioc_pg1;
4662 Mpi2ConfigReply_t mpi_reply;
4664 mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
4665 memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
4667 switch (perf_mode) {
4668 case MPT_PERF_MODE_DEFAULT:
4669 case MPT_PERF_MODE_BALANCED:
4670 if (ioc->high_iops_queues) {
4672 "Enable interrupt coalescing only for first\t"
4673 "%d reply queues\n",
4674 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
4676 * If 31st bit is zero then interrupt coalescing is
4677 * enabled for all reply descriptor post queues.
4678 * If 31st bit is set to one then user can
4679 * enable/disable interrupt coalescing on per reply
4680 * descriptor post queue group(8) basis. So to enable
4681 * interrupt coalescing only on first reply descriptor
4682 * post queue group 31st bit and zero th bit is enabled.
4684 ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
4685 ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
4686 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4687 ioc_info(ioc, "performance mode: balanced\n");
4691 case MPT_PERF_MODE_LATENCY:
4693 * Enable interrupt coalescing on all reply queues
4694 * with timeout value 0xA
4696 ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
4697 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4698 ioc_pg1.ProductSpecific = 0;
4699 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4700 ioc_info(ioc, "performance mode: latency\n");
4702 case MPT_PERF_MODE_IOPS:
4704 * Enable interrupt coalescing on all reply queues.
4707 "performance mode: iops with coalescing timeout: 0x%x\n",
4708 le32_to_cpu(ioc_pg1.CoalescingTimeout));
4709 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4710 ioc_pg1.ProductSpecific = 0;
4711 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4717 * _base_static_config_pages - static start of day config pages
4718 * @ioc: per adapter object
4721 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
4723 Mpi2ConfigReply_t mpi_reply;
4724 u32 iounit_pg1_flags;
4726 ioc->nvme_abort_timeout = 30;
4727 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
4728 if (ioc->ir_firmware)
4729 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
4733 * Ensure correct T10 PI operation if vendor left EEDPTagMode
4734 * flag unset in NVDATA.
4736 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
4737 if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
4738 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
4740 ioc->manu_pg11.EEDPTagMode &= ~0x3;
4741 ioc->manu_pg11.EEDPTagMode |= 0x1;
4742 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
4745 if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
4746 ioc->tm_custom_handling = 1;
4748 ioc->tm_custom_handling = 0;
4749 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
4750 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
4751 else if (ioc->manu_pg11.NVMeAbortTO >
4752 NVME_TASK_ABORT_MAX_TIMEOUT)
4753 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
4755 ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
4758 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
4759 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
4760 mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
4761 mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
4762 mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4763 mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
4764 _base_display_ioc_capabilities(ioc);
4767 * Enable task_set_full handling in iounit_pg1 when the
4768 * facts capabilities indicate that its supported.
4770 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4771 if ((ioc->facts.IOCCapabilities &
4772 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
4774 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4777 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4778 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
4779 mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4781 if (ioc->iounit_pg8.NumSensors)
4782 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
4783 if (ioc->is_aero_ioc)
4784 _base_update_ioc_page1_inlinewith_perf_mode(ioc);
4788 * mpt3sas_free_enclosure_list - release memory
4789 * @ioc: per adapter object
4791 * Free memory allocated during encloure add.
4794 mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
4796 struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
4798 /* Free enclosure list */
4799 list_for_each_entry_safe(enclosure_dev,
4800 enclosure_dev_next, &ioc->enclosure_list, list) {
4801 list_del(&enclosure_dev->list);
4802 kfree(enclosure_dev);
4807 * _base_release_memory_pools - release memory
4808 * @ioc: per adapter object
4810 * Free memory allocated from _base_allocate_memory_pools.
4813 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4817 int dma_alloc_count = 0;
4818 struct chain_tracker *ct;
4819 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
4821 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4824 dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
4825 ioc->request, ioc->request_dma);
4827 ioc_info(ioc, "request_pool(0x%p): free\n",
4829 ioc->request = NULL;
4833 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4834 dma_pool_destroy(ioc->sense_dma_pool);
4836 ioc_info(ioc, "sense_pool(0x%p): free\n",
4842 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
4843 dma_pool_destroy(ioc->reply_dma_pool);
4845 ioc_info(ioc, "reply_pool(0x%p): free\n",
4850 if (ioc->reply_free) {
4851 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
4852 ioc->reply_free_dma);
4853 dma_pool_destroy(ioc->reply_free_dma_pool);
4855 ioc_info(ioc, "reply_free_pool(0x%p): free\n",
4857 ioc->reply_free = NULL;
4860 if (ioc->reply_post) {
4861 dma_alloc_count = DIV_ROUND_UP(count,
4862 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
4863 for (i = 0; i < count; i++) {
4864 if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
4865 && dma_alloc_count) {
4866 if (ioc->reply_post[i].reply_post_free) {
4868 ioc->reply_post_free_dma_pool,
4869 ioc->reply_post[i].reply_post_free,
4870 ioc->reply_post[i].reply_post_free_dma);
4871 dexitprintk(ioc, ioc_info(ioc,
4872 "reply_post_free_pool(0x%p): free\n",
4873 ioc->reply_post[i].reply_post_free));
4874 ioc->reply_post[i].reply_post_free =
4880 dma_pool_destroy(ioc->reply_post_free_dma_pool);
4881 if (ioc->reply_post_free_array &&
4882 ioc->rdpq_array_enable) {
4883 dma_pool_free(ioc->reply_post_free_array_dma_pool,
4884 ioc->reply_post_free_array,
4885 ioc->reply_post_free_array_dma);
4886 ioc->reply_post_free_array = NULL;
4888 dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
4889 kfree(ioc->reply_post);
4892 if (ioc->pcie_sgl_dma_pool) {
4893 for (i = 0; i < ioc->scsiio_depth; i++) {
4894 dma_pool_free(ioc->pcie_sgl_dma_pool,
4895 ioc->pcie_sg_lookup[i].pcie_sgl,
4896 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4898 dma_pool_destroy(ioc->pcie_sgl_dma_pool);
4901 if (ioc->config_page) {
4903 ioc_info(ioc, "config_page(0x%p): free\n",
4905 dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
4906 ioc->config_page, ioc->config_page_dma);
4909 kfree(ioc->hpr_lookup);
4910 ioc->hpr_lookup = NULL;
4911 kfree(ioc->internal_lookup);
4912 ioc->internal_lookup = NULL;
4913 if (ioc->chain_lookup) {
4914 for (i = 0; i < ioc->scsiio_depth; i++) {
4915 for (j = ioc->chains_per_prp_buffer;
4916 j < ioc->chains_needed_per_io; j++) {
4917 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4918 if (ct && ct->chain_buffer)
4919 dma_pool_free(ioc->chain_dma_pool,
4921 ct->chain_buffer_dma);
4923 kfree(ioc->chain_lookup[i].chains_per_smid);
4925 dma_pool_destroy(ioc->chain_dma_pool);
4926 kfree(ioc->chain_lookup);
4927 ioc->chain_lookup = NULL;
4932 * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
4933 * having same upper 32bits in their base memory address.
4934 * @reply_pool_start_address: Base address of a reply queue set
4935 * @pool_sz: Size of single Reply Descriptor Post Queues pool size
4937 * Return: 1 if reply queues in a set have a same upper 32bits in their base
4938 * memory address, else 0.
4942 mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
4944 long reply_pool_end_address;
4946 reply_pool_end_address = reply_pool_start_address + pool_sz;
4948 if (upper_32_bits(reply_pool_start_address) ==
4949 upper_32_bits(reply_pool_end_address))
4956 * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
4958 * @ioc: per adapter object
4959 * @sz: DMA Pool size
4960 * Return: 0 for success, non-zero for failure.
4963 base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
4966 u32 dma_alloc_count = 0;
4967 int reply_post_free_sz = ioc->reply_post_queue_depth *
4968 sizeof(Mpi2DefaultReplyDescriptor_t);
4969 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
4971 ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
4973 if (!ioc->reply_post)
4976 * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
4977 * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should
4978 * be within 4GB boundary i.e reply queues in a set must have same
4979 * upper 32-bits in their memory address. so here driver is allocating
4980 * the DMA'able memory for reply queues according.
4981 * Driver uses limitation of
4982 * VENTURA_SERIES to manage INVADER_SERIES as well.
4984 dma_alloc_count = DIV_ROUND_UP(count,
4985 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
4986 ioc->reply_post_free_dma_pool =
4987 dma_pool_create("reply_post_free pool",
4988 &ioc->pdev->dev, sz, 16, 0);
4989 if (!ioc->reply_post_free_dma_pool)
4991 for (i = 0; i < count; i++) {
4992 if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
4993 ioc->reply_post[i].reply_post_free =
4994 dma_pool_zalloc(ioc->reply_post_free_dma_pool,
4996 &ioc->reply_post[i].reply_post_free_dma);
4997 if (!ioc->reply_post[i].reply_post_free)
5000 * Each set of RDPQ pool must satisfy 4gb boundary
5002 * 1) Check if allocated resources for RDPQ pool are in
5003 * the same 4GB range.
5004 * 2) If #1 is true, continue with 64 bit DMA.
5005 * 3) If #1 is false, return 1. which means free all the
5006 * resources and set DMA mask to 32 and allocate.
5008 if (!mpt3sas_check_same_4gb_region(
5009 (long)ioc->reply_post[i].reply_post_free, sz)) {
5011 ioc_err(ioc, "bad Replypost free pool(0x%p)"
5012 "reply_post_free_dma = (0x%llx)\n",
5013 ioc->reply_post[i].reply_post_free,
5014 (unsigned long long)
5015 ioc->reply_post[i].reply_post_free_dma));
5021 ioc->reply_post[i].reply_post_free =
5022 (Mpi2ReplyDescriptorsUnion_t *)
5023 ((long)ioc->reply_post[i-1].reply_post_free
5024 + reply_post_free_sz);
5025 ioc->reply_post[i].reply_post_free_dma =
5027 (ioc->reply_post[i-1].reply_post_free_dma +
5028 reply_post_free_sz);
5035 * _base_allocate_memory_pools - allocate start of day memory pools
5036 * @ioc: per adapter object
5038 * Return: 0 success, anything else error.
5041 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5043 struct mpt3sas_facts *facts;
5044 u16 max_sge_elements;
5045 u16 chains_needed_per_io;
5046 u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
5049 u16 max_request_credit, nvme_blocks_needed;
5050 unsigned short sg_tablesize;
5054 struct chain_tracker *ct;
5056 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5060 facts = &ioc->facts;
5062 /* command line tunables for max sgl entries */
5063 if (max_sgl_entries != -1)
5064 sg_tablesize = max_sgl_entries;
5066 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
5067 sg_tablesize = MPT2SAS_SG_DEPTH;
5069 sg_tablesize = MPT3SAS_SG_DEPTH;
5072 /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
5074 sg_tablesize = min_t(unsigned short, sg_tablesize,
5075 MPT_KDUMP_MIN_PHYS_SEGMENTS);
5077 if (ioc->is_mcpu_endpoint)
5078 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5080 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
5081 sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5082 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
5083 sg_tablesize = min_t(unsigned short, sg_tablesize,
5085 ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
5086 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
5088 ioc->shost->sg_tablesize = sg_tablesize;
5091 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
5092 (facts->RequestCredit / 4));
5093 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
5094 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
5095 INTERNAL_SCSIIO_CMDS_COUNT)) {
5096 ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
5097 facts->RequestCredit);
5100 ioc->internal_depth = 10;
5103 ioc->hi_priority_depth = ioc->internal_depth - (5);
5104 /* command line tunables for max controller queue depth */
5105 if (max_queue_depth != -1 && max_queue_depth != 0) {
5106 max_request_credit = min_t(u16, max_queue_depth +
5107 ioc->internal_depth, facts->RequestCredit);
5108 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
5109 max_request_credit = MAX_HBA_QUEUE_DEPTH;
5110 } else if (reset_devices)
5111 max_request_credit = min_t(u16, facts->RequestCredit,
5112 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
5114 max_request_credit = min_t(u16, facts->RequestCredit,
5115 MAX_HBA_QUEUE_DEPTH);
5117 /* Firmware maintains additional facts->HighPriorityCredit number of
5118 * credits for HiPriprity Request messages, so hba queue depth will be
5119 * sum of max_request_credit and high priority queue depth.
5121 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
5123 /* request frame size */
5124 ioc->request_sz = facts->IOCRequestFrameSize * 4;
5126 /* reply frame size */
5127 ioc->reply_sz = facts->ReplyFrameSize * 4;
5129 /* chain segment size */
5130 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
5131 if (facts->IOCMaxChainSegmentSize)
5132 ioc->chain_segment_sz =
5133 facts->IOCMaxChainSegmentSize *
5136 /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
5137 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
5140 ioc->chain_segment_sz = ioc->request_sz;
5142 /* calculate the max scatter element size */
5143 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
5147 /* calculate number of sg elements left over in the 1st frame */
5148 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
5149 sizeof(Mpi2SGEIOUnion_t)) + sge_size);
5150 ioc->max_sges_in_main_message = max_sge_elements/sge_size;
5152 /* now do the same for a chain buffer */
5153 max_sge_elements = ioc->chain_segment_sz - sge_size;
5154 ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
5157 * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
5159 chains_needed_per_io = ((ioc->shost->sg_tablesize -
5160 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
5162 if (chains_needed_per_io > facts->MaxChainDepth) {
5163 chains_needed_per_io = facts->MaxChainDepth;
5164 ioc->shost->sg_tablesize = min_t(u16,
5165 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
5166 * chains_needed_per_io), ioc->shost->sg_tablesize);
5168 ioc->chains_needed_per_io = chains_needed_per_io;
5170 /* reply free queue sizing - taking into account for 64 FW events */
5171 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5173 /* mCPU manage single counters for simplicity */
5174 if (ioc->is_mcpu_endpoint)
5175 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
5177 /* calculate reply descriptor post queue depth */
5178 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
5179 ioc->reply_free_queue_depth + 1;
5180 /* align the reply post queue on the next 16 count boundary */
5181 if (ioc->reply_post_queue_depth % 16)
5182 ioc->reply_post_queue_depth += 16 -
5183 (ioc->reply_post_queue_depth % 16);
5186 if (ioc->reply_post_queue_depth >
5187 facts->MaxReplyDescriptorPostQueueDepth) {
5188 ioc->reply_post_queue_depth =
5189 facts->MaxReplyDescriptorPostQueueDepth -
5190 (facts->MaxReplyDescriptorPostQueueDepth % 16);
5191 ioc->hba_queue_depth =
5192 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
5193 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5197 "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
5198 "sge_per_io(%d), chains_per_io(%d)\n",
5199 ioc->max_sges_in_main_message,
5200 ioc->max_sges_in_chain_message,
5201 ioc->shost->sg_tablesize,
5202 ioc->chains_needed_per_io);
5204 /* reply post queue, 16 byte align */
5205 reply_post_free_sz = ioc->reply_post_queue_depth *
5206 sizeof(Mpi2DefaultReplyDescriptor_t);
5207 rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
5208 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
5209 rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
5210 ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
5211 if (ret == -EAGAIN) {
5213 * Free allocated bad RDPQ memory pools.
5214 * Change dma coherent mask to 32 bit and reallocate RDPQ
5216 _base_release_memory_pools(ioc);
5217 ioc->use_32bit_dma = true;
5218 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
5220 "32 DMA mask failed %s\n", pci_name(ioc->pdev));
5223 if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
5225 } else if (ret == -ENOMEM)
5227 total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
5228 DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
5229 ioc->scsiio_depth = ioc->hba_queue_depth -
5230 ioc->hi_priority_depth - ioc->internal_depth;
5232 /* set the scsi host can_queue depth
5233 * with some internal commands that could be outstanding
5235 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
5237 ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
5238 ioc->shost->can_queue));
5240 /* contiguous pool for request and chains, 16 byte align, one extra "
5243 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
5244 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
5246 /* hi-priority queue */
5247 sz += (ioc->hi_priority_depth * ioc->request_sz);
5249 /* internal queue */
5250 sz += (ioc->internal_depth * ioc->request_sz);
5252 ioc->request_dma_sz = sz;
5253 ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
5254 &ioc->request_dma, GFP_KERNEL);
5255 if (!ioc->request) {
5256 ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
5257 ioc->hba_queue_depth, ioc->chains_needed_per_io,
5258 ioc->request_sz, sz / 1024);
5259 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
5262 ioc->hba_queue_depth -= retry_sz;
5263 _base_release_memory_pools(ioc);
5264 goto retry_allocation;
5268 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
5269 ioc->hba_queue_depth, ioc->chains_needed_per_io,
5270 ioc->request_sz, sz / 1024);
5272 /* hi-priority queue */
5273 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
5275 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
5278 /* internal queue */
5279 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
5281 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
5285 "request pool(0x%p) - dma(0x%llx): "
5286 "depth(%d), frame_size(%d), pool_size(%d kB)\n",
5287 ioc->request, (unsigned long long) ioc->request_dma,
5288 ioc->hba_queue_depth, ioc->request_sz,
5289 (ioc->hba_queue_depth * ioc->request_sz) / 1024);
5294 ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
5295 ioc->request, ioc->scsiio_depth));
5297 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
5298 sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
5299 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
5300 if (!ioc->chain_lookup) {
5301 ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
5305 sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
5306 for (i = 0; i < ioc->scsiio_depth; i++) {
5307 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
5308 if (!ioc->chain_lookup[i].chains_per_smid) {
5309 ioc_err(ioc, "chain_lookup: kzalloc failed\n");
5314 /* initialize hi-priority queue smid's */
5315 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
5316 sizeof(struct request_tracker), GFP_KERNEL);
5317 if (!ioc->hpr_lookup) {
5318 ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
5321 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
5323 ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
5325 ioc->hi_priority_depth, ioc->hi_priority_smid));
5327 /* initialize internal queue smid's */
5328 ioc->internal_lookup = kcalloc(ioc->internal_depth,
5329 sizeof(struct request_tracker), GFP_KERNEL);
5330 if (!ioc->internal_lookup) {
5331 ioc_err(ioc, "internal_lookup: kcalloc failed\n");
5334 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
5336 ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
5338 ioc->internal_depth, ioc->internal_smid));
5340 * The number of NVMe page sized blocks needed is:
5341 * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
5342 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
5343 * that is placed in the main message frame. 8 is the size of each PRP
5344 * entry or PRP list pointer entry. 8 is subtracted from page_size
5345 * because of the PRP list pointer entry at the end of a page, so this
5346 * is not counted as a PRP entry. The 1 added page is a round up.
5348 * To avoid allocation failures due to the amount of memory that could
5349 * be required for NVMe PRP's, only each set of NVMe blocks will be
5350 * contiguous, so a new set is allocated for each possible I/O.
5352 ioc->chains_per_prp_buffer = 0;
5353 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
5354 nvme_blocks_needed =
5355 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
5356 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
5357 nvme_blocks_needed++;
5359 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
5360 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
5361 if (!ioc->pcie_sg_lookup) {
5362 ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
5365 sz = nvme_blocks_needed * ioc->page_size;
5366 ioc->pcie_sgl_dma_pool =
5367 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
5368 if (!ioc->pcie_sgl_dma_pool) {
5369 ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
5373 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
5374 ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
5375 ioc->chains_needed_per_io);
5377 for (i = 0; i < ioc->scsiio_depth; i++) {
5378 ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
5379 ioc->pcie_sgl_dma_pool, GFP_KERNEL,
5380 &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5381 if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
5382 ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
5385 for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
5386 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5388 ioc->pcie_sg_lookup[i].pcie_sgl +
5389 (j * ioc->chain_segment_sz);
5390 ct->chain_buffer_dma =
5391 ioc->pcie_sg_lookup[i].pcie_sgl_dma +
5392 (j * ioc->chain_segment_sz);
5397 ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
5398 ioc->scsiio_depth, sz,
5399 (sz * ioc->scsiio_depth) / 1024));
5401 ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
5402 ioc->chains_per_prp_buffer));
5403 total_sz += sz * ioc->scsiio_depth;
5406 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
5407 ioc->chain_segment_sz, 16, 0);
5408 if (!ioc->chain_dma_pool) {
5409 ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
5412 for (i = 0; i < ioc->scsiio_depth; i++) {
5413 for (j = ioc->chains_per_prp_buffer;
5414 j < ioc->chains_needed_per_io; j++) {
5415 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5416 ct->chain_buffer = dma_pool_alloc(
5417 ioc->chain_dma_pool, GFP_KERNEL,
5418 &ct->chain_buffer_dma);
5419 if (!ct->chain_buffer) {
5420 ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
5424 total_sz += ioc->chain_segment_sz;
5428 ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
5429 ioc->chain_depth, ioc->chain_segment_sz,
5430 (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
5432 /* sense buffers, 4 byte align */
5433 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
5434 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
5436 if (!ioc->sense_dma_pool) {
5437 ioc_err(ioc, "sense pool: dma_pool_create failed\n");
5440 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
5443 ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
5446 /* sense buffer requires to be in same 4 gb region.
5447 * Below function will check the same.
5448 * In case of failure, new pci pool will be created with updated
5449 * alignment. Older allocation and pool will be destroyed.
5450 * Alignment will be used such a way that next allocation if
5451 * success, will always meet same 4gb region requirement.
5452 * Actual requirement is not alignment, but we need start and end of
5453 * DMA address must have same upper 32 bit address.
5455 if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
5456 //Release Sense pool & Reallocate
5457 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
5458 dma_pool_destroy(ioc->sense_dma_pool);
5461 ioc->sense_dma_pool =
5462 dma_pool_create("sense pool", &ioc->pdev->dev, sz,
5463 roundup_pow_of_two(sz), 0);
5464 if (!ioc->sense_dma_pool) {
5465 ioc_err(ioc, "sense pool: pci_pool_create failed\n");
5468 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
5471 ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
5476 "sense pool(0x%p)- dma(0x%llx): depth(%d),"
5477 "element_size(%d), pool_size(%d kB)\n",
5478 ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
5479 SCSI_SENSE_BUFFERSIZE, sz / 1024);
5483 /* reply pool, 4 byte align */
5484 sz = ioc->reply_free_queue_depth * ioc->reply_sz;
5485 ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
5487 if (!ioc->reply_dma_pool) {
5488 ioc_err(ioc, "reply pool: dma_pool_create failed\n");
5491 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
5494 ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
5497 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
5498 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
5500 ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
5501 ioc->reply, ioc->reply_free_queue_depth,
5502 ioc->reply_sz, sz / 1024));
5504 ioc_info(ioc, "reply_dma(0x%llx)\n",
5505 (unsigned long long)ioc->reply_dma));
5508 /* reply free queue, 16 byte align */
5509 sz = ioc->reply_free_queue_depth * 4;
5510 ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
5511 &ioc->pdev->dev, sz, 16, 0);
5512 if (!ioc->reply_free_dma_pool) {
5513 ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
5516 ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
5517 &ioc->reply_free_dma);
5518 if (!ioc->reply_free) {
5519 ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
5523 ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
5524 ioc->reply_free, ioc->reply_free_queue_depth,
5527 ioc_info(ioc, "reply_free_dma (0x%llx)\n",
5528 (unsigned long long)ioc->reply_free_dma));
5531 if (ioc->rdpq_array_enable) {
5532 reply_post_free_array_sz = ioc->reply_queue_count *
5533 sizeof(Mpi2IOCInitRDPQArrayEntry);
5534 ioc->reply_post_free_array_dma_pool =
5535 dma_pool_create("reply_post_free_array pool",
5536 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
5537 if (!ioc->reply_post_free_array_dma_pool) {
5539 ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
5542 ioc->reply_post_free_array =
5543 dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
5544 GFP_KERNEL, &ioc->reply_post_free_array_dma);
5545 if (!ioc->reply_post_free_array) {
5547 ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
5551 ioc->config_page_sz = 512;
5552 ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
5553 ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
5554 if (!ioc->config_page) {
5555 ioc_err(ioc, "config page: dma_pool_alloc failed\n");
5559 ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
5560 ioc->config_page, (unsigned long long)ioc->config_page_dma,
5561 ioc->config_page_sz);
5562 total_sz += ioc->config_page_sz;
5564 ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
5566 ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
5567 ioc->shost->can_queue, facts->RequestCredit);
5568 ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
5569 ioc->shost->sg_tablesize);
5577 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
5578 * @ioc: Pointer to MPT_ADAPTER structure
5579 * @cooked: Request raw or cooked IOC state
5581 * Return: all IOC Doorbell register bits if cooked==0, else just the
5582 * Doorbell bits in MPI_IOC_STATE_MASK.
5585 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
5589 s = ioc->base_readl(&ioc->chip->Doorbell);
5590 sc = s & MPI2_IOC_STATE_MASK;
5591 return cooked ? sc : s;
5595 * _base_wait_on_iocstate - waiting on a particular ioc state
5597 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
5598 * @timeout: timeout in second
5600 * Return: 0 for success, non-zero for failure.
5603 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
5609 cntdn = 1000 * timeout;
5611 current_state = mpt3sas_base_get_iocstate(ioc, 1);
5612 if (current_state == ioc_state)
5614 if (count && current_state == MPI2_IOC_STATE_FAULT)
5616 if (count && current_state == MPI2_IOC_STATE_COREDUMP)
5619 usleep_range(1000, 1500);
5623 return current_state;
5627 * _base_dump_reg_set - This function will print hexdump of register set.
5628 * @ioc: per adapter object
5633 _base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
5635 unsigned int i, sz = 256;
5636 u32 __iomem *reg = (u32 __iomem *)ioc->chip;
5638 ioc_info(ioc, "System Register set:\n");
5639 for (i = 0; i < (sz / sizeof(u32)); i++)
5640 pr_info("%08x: %08x\n", (i * 4), readl(®[i]));
5644 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
5645 * a write to the doorbell)
5646 * @ioc: per adapter object
5647 * @timeout: timeout in seconds
5649 * Return: 0 for success, non-zero for failure.
5651 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
5655 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5661 cntdn = 1000 * timeout;
5663 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5664 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5666 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5667 __func__, count, timeout));
5671 usleep_range(1000, 1500);
5675 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5676 __func__, count, int_status);
5681 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5687 cntdn = 2000 * timeout;
5689 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5690 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5692 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5693 __func__, count, timeout));
5701 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5702 __func__, count, int_status);
5708 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
5709 * @ioc: per adapter object
5710 * @timeout: timeout in second
5712 * Return: 0 for success, non-zero for failure.
5714 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
5718 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
5725 cntdn = 1000 * timeout;
5727 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5728 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
5730 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5731 __func__, count, timeout));
5733 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5734 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
5735 if ((doorbell & MPI2_IOC_STATE_MASK) ==
5736 MPI2_IOC_STATE_FAULT) {
5737 mpt3sas_print_fault_code(ioc, doorbell);
5740 if ((doorbell & MPI2_IOC_STATE_MASK) ==
5741 MPI2_IOC_STATE_COREDUMP) {
5742 mpt3sas_print_coredump_info(ioc, doorbell);
5745 } else if (int_status == 0xFFFFFFFF)
5748 usleep_range(1000, 1500);
5753 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5754 __func__, count, int_status);
5759 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
5760 * @ioc: per adapter object
5761 * @timeout: timeout in second
5763 * Return: 0 for success, non-zero for failure.
5766 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
5772 cntdn = 1000 * timeout;
5774 doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
5775 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
5777 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5778 __func__, count, timeout));
5782 usleep_range(1000, 1500);
5786 ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
5787 __func__, count, doorbell_reg);
5792 * _base_send_ioc_reset - send doorbell reset
5793 * @ioc: per adapter object
5794 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
5795 * @timeout: timeout in second
5797 * Return: 0 for success, non-zero for failure.
5800 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
5804 unsigned long flags;
5806 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
5807 ioc_err(ioc, "%s: unknown reset_type\n", __func__);
5811 if (!(ioc->facts.IOCCapabilities &
5812 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
5815 ioc_info(ioc, "sending message unit reset !!\n");
5817 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
5818 &ioc->chip->Doorbell);
5819 if ((_base_wait_for_doorbell_ack(ioc, 15))) {
5824 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
5826 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
5827 __func__, ioc_state);
5833 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5834 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5836 * Wait for IOC state CoreDump to clear only during
5837 * HBA initialization & release time.
5839 if ((ioc_state & MPI2_IOC_STATE_MASK) ==
5840 MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
5841 ioc->fault_reset_work_q == NULL)) {
5842 spin_unlock_irqrestore(
5843 &ioc->ioc_reset_in_progress_lock, flags);
5844 mpt3sas_print_coredump_info(ioc, ioc_state);
5845 mpt3sas_base_wait_for_coredump_completion(ioc,
5848 &ioc->ioc_reset_in_progress_lock, flags);
5850 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5852 ioc_info(ioc, "message unit reset: %s\n",
5853 r == 0 ? "SUCCESS" : "FAILED");
5858 * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
5859 * @ioc: per adapter object
5860 * @timeout: timeout in seconds
5862 * Return: Waits up to timeout seconds for the IOC to
5863 * become operational. Returns 0 if IOC is present
5864 * and operational; otherwise returns -EFAULT.
5868 mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
5870 int wait_state_count = 0;
5874 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5875 if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
5878 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
5879 __func__, ++wait_state_count);
5880 } while (--timeout);
5882 ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
5885 if (wait_state_count)
5886 ioc_info(ioc, "ioc is operational\n");
5891 * _base_handshake_req_reply_wait - send request thru doorbell interface
5892 * @ioc: per adapter object
5893 * @request_bytes: request length
5894 * @request: pointer having request payload
5895 * @reply_bytes: reply length
5896 * @reply: pointer to reply payload
5897 * @timeout: timeout in second
5899 * Return: 0 for success, non-zero for failure.
5902 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5903 u32 *request, int reply_bytes, u16 *reply, int timeout)
5905 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
5910 /* make sure doorbell is not in use */
5911 if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
5912 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
5916 /* clear pending doorbell interrupts from previous state changes */
5917 if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
5918 MPI2_HIS_IOC2SYS_DB_STATUS)
5919 writel(0, &ioc->chip->HostInterruptStatus);
5921 /* send message to ioc */
5922 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
5923 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
5924 &ioc->chip->Doorbell);
5926 if ((_base_spin_on_doorbell_int(ioc, 5))) {
5927 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5931 writel(0, &ioc->chip->HostInterruptStatus);
5933 if ((_base_wait_for_doorbell_ack(ioc, 5))) {
5934 ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
5939 /* send message 32-bits at a time */
5940 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
5941 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
5942 if ((_base_wait_for_doorbell_ack(ioc, 5)))
5947 ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
5952 /* now wait for the reply */
5953 if ((_base_wait_for_doorbell_int(ioc, timeout))) {
5954 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5959 /* read the first two 16-bits, it gives the total length of the reply */
5960 reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5961 & MPI2_DOORBELL_DATA_MASK);
5962 writel(0, &ioc->chip->HostInterruptStatus);
5963 if ((_base_wait_for_doorbell_int(ioc, 5))) {
5964 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5968 reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5969 & MPI2_DOORBELL_DATA_MASK);
5970 writel(0, &ioc->chip->HostInterruptStatus);
5972 for (i = 2; i < default_reply->MsgLength * 2; i++) {
5973 if ((_base_wait_for_doorbell_int(ioc, 5))) {
5974 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5978 if (i >= reply_bytes/2) /* overflow case */
5979 ioc->base_readl(&ioc->chip->Doorbell);
5981 reply[i] = le16_to_cpu(
5982 ioc->base_readl(&ioc->chip->Doorbell)
5983 & MPI2_DOORBELL_DATA_MASK);
5984 writel(0, &ioc->chip->HostInterruptStatus);
5987 _base_wait_for_doorbell_int(ioc, 5);
5988 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
5990 ioc_info(ioc, "doorbell is in use (line=%d)\n",
5993 writel(0, &ioc->chip->HostInterruptStatus);
5995 if (ioc->logging_level & MPT_DEBUG_INIT) {
5996 mfp = (__le32 *)reply;
5997 pr_info("\toffset:data\n");
5998 for (i = 0; i < reply_bytes/4; i++)
5999 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
6000 le32_to_cpu(mfp[i]));
6006 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
6007 * @ioc: per adapter object
6008 * @mpi_reply: the reply payload from FW
6009 * @mpi_request: the request payload sent to FW
6011 * The SAS IO Unit Control Request message allows the host to perform low-level
6012 * operations, such as resets on the PHYs of the IO Unit, also allows the host
6013 * to obtain the IOC assigned device handles for a device if it has other
6014 * identifying information about the device, in addition allows the host to
6015 * remove IOC resources associated with the device.
6017 * Return: 0 for success, non-zero for failure.
6020 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
6021 Mpi2SasIoUnitControlReply_t *mpi_reply,
6022 Mpi2SasIoUnitControlRequest_t *mpi_request)
6029 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6031 mutex_lock(&ioc->base_cmds.mutex);
6033 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6034 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6039 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6043 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6045 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6051 ioc->base_cmds.status = MPT3_CMD_PENDING;
6052 request = mpt3sas_base_get_msg_frame(ioc, smid);
6053 ioc->base_cmds.smid = smid;
6054 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
6055 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6056 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
6057 ioc->ioc_link_reset_in_progress = 1;
6058 init_completion(&ioc->base_cmds.done);
6059 ioc->put_smid_default(ioc, smid);
6060 wait_for_completion_timeout(&ioc->base_cmds.done,
6061 msecs_to_jiffies(10000));
6062 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6063 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
6064 ioc->ioc_link_reset_in_progress)
6065 ioc->ioc_link_reset_in_progress = 0;
6066 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6067 mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
6068 mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
6070 goto issue_host_reset;
6072 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6073 memcpy(mpi_reply, ioc->base_cmds.reply,
6074 sizeof(Mpi2SasIoUnitControlReply_t));
6076 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
6077 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6082 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6083 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6086 mutex_unlock(&ioc->base_cmds.mutex);
6091 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
6092 * @ioc: per adapter object
6093 * @mpi_reply: the reply payload from FW
6094 * @mpi_request: the request payload sent to FW
6096 * The SCSI Enclosure Processor request message causes the IOC to
6097 * communicate with SES devices to control LED status signals.
6099 * Return: 0 for success, non-zero for failure.
6102 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
6103 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
6110 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6112 mutex_lock(&ioc->base_cmds.mutex);
6114 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6115 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6120 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6124 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6126 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6132 ioc->base_cmds.status = MPT3_CMD_PENDING;
6133 request = mpt3sas_base_get_msg_frame(ioc, smid);
6134 ioc->base_cmds.smid = smid;
6135 memset(request, 0, ioc->request_sz);
6136 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
6137 init_completion(&ioc->base_cmds.done);
6138 ioc->put_smid_default(ioc, smid);
6139 wait_for_completion_timeout(&ioc->base_cmds.done,
6140 msecs_to_jiffies(10000));
6141 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6142 mpt3sas_check_cmd_timeout(ioc,
6143 ioc->base_cmds.status, mpi_request,
6144 sizeof(Mpi2SepRequest_t)/4, issue_reset);
6145 goto issue_host_reset;
6147 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6148 memcpy(mpi_reply, ioc->base_cmds.reply,
6149 sizeof(Mpi2SepReply_t));
6151 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
6152 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6157 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6158 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6161 mutex_unlock(&ioc->base_cmds.mutex);
6166 * _base_get_port_facts - obtain port facts reply and save in ioc
6167 * @ioc: per adapter object
6170 * Return: 0 for success, non-zero for failure.
6173 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
6175 Mpi2PortFactsRequest_t mpi_request;
6176 Mpi2PortFactsReply_t mpi_reply;
6177 struct mpt3sas_port_facts *pfacts;
6178 int mpi_reply_sz, mpi_request_sz, r;
6180 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6182 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
6183 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
6184 memset(&mpi_request, 0, mpi_request_sz);
6185 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
6186 mpi_request.PortNumber = port;
6187 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6188 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6191 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6195 pfacts = &ioc->pfacts[port];
6196 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
6197 pfacts->PortNumber = mpi_reply.PortNumber;
6198 pfacts->VP_ID = mpi_reply.VP_ID;
6199 pfacts->VF_ID = mpi_reply.VF_ID;
6200 pfacts->MaxPostedCmdBuffers =
6201 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
6207 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
6208 * @ioc: per adapter object
6211 * Return: 0 for success, non-zero for failure.
6214 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
6219 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6221 if (ioc->pci_error_recovery) {
6223 ioc_info(ioc, "%s: host in pci error recovery\n",
6228 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6230 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6231 __func__, ioc_state));
6233 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
6234 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6237 if (ioc_state & MPI2_DOORBELL_USED) {
6238 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
6239 goto issue_diag_reset;
6242 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6243 mpt3sas_print_fault_code(ioc, ioc_state &
6244 MPI2_DOORBELL_DATA_MASK);
6245 goto issue_diag_reset;
6246 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
6247 MPI2_IOC_STATE_COREDUMP) {
6249 "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
6250 __func__, ioc_state);
6254 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6257 ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6258 __func__, ioc_state));
6263 rc = _base_diag_reset(ioc);
6268 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
6269 * @ioc: per adapter object
6271 * Return: 0 for success, non-zero for failure.
6274 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
6276 Mpi2IOCFactsRequest_t mpi_request;
6277 Mpi2IOCFactsReply_t mpi_reply;
6278 struct mpt3sas_facts *facts;
6279 int mpi_reply_sz, mpi_request_sz, r;
6281 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6283 r = _base_wait_for_iocstate(ioc, 10);
6286 ioc_info(ioc, "%s: failed getting to correct state\n",
6290 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
6291 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
6292 memset(&mpi_request, 0, mpi_request_sz);
6293 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
6294 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6295 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6298 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6302 facts = &ioc->facts;
6303 memset(facts, 0, sizeof(struct mpt3sas_facts));
6304 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
6305 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
6306 facts->VP_ID = mpi_reply.VP_ID;
6307 facts->VF_ID = mpi_reply.VF_ID;
6308 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
6309 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
6310 facts->WhoInit = mpi_reply.WhoInit;
6311 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
6312 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
6313 if (ioc->msix_enable && (facts->MaxMSIxVectors <=
6314 MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
6315 ioc->combined_reply_queue = 0;
6316 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
6317 facts->MaxReplyDescriptorPostQueueDepth =
6318 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
6319 facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
6320 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
6321 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
6322 ioc->ir_firmware = 1;
6323 if ((facts->IOCCapabilities &
6324 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
6325 ioc->rdpq_array_capable = 1;
6326 if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
6327 && ioc->is_aero_ioc)
6328 ioc->atomic_desc_capable = 1;
6329 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
6330 facts->IOCRequestFrameSize =
6331 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
6332 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6333 facts->IOCMaxChainSegmentSize =
6334 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
6336 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
6337 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
6338 ioc->shost->max_id = -1;
6339 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
6340 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
6341 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
6342 facts->HighPriorityCredit =
6343 le16_to_cpu(mpi_reply.HighPriorityCredit);
6344 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
6345 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
6346 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
6349 * Get the Page Size from IOC Facts. If it's 0, default to 4k.
6351 ioc->page_size = 1 << facts->CurrentHostPageSize;
6352 if (ioc->page_size == 1) {
6353 ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
6354 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
6357 ioc_info(ioc, "CurrentHostPageSize(%d)\n",
6358 facts->CurrentHostPageSize));
6361 ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
6362 facts->RequestCredit, facts->MaxChainDepth));
6364 ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
6365 facts->IOCRequestFrameSize * 4,
6366 facts->ReplyFrameSize * 4));
6371 * _base_send_ioc_init - send ioc_init to firmware
6372 * @ioc: per adapter object
6374 * Return: 0 for success, non-zero for failure.
6377 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
6379 Mpi2IOCInitRequest_t mpi_request;
6380 Mpi2IOCInitReply_t mpi_reply;
6382 ktime_t current_time;
6384 u32 reply_post_free_array_sz = 0;
6386 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6388 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
6389 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
6390 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
6391 mpi_request.VF_ID = 0; /* TODO */
6392 mpi_request.VP_ID = 0;
6393 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
6394 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
6395 mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
6397 if (_base_is_controller_msix_enabled(ioc))
6398 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
6399 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
6400 mpi_request.ReplyDescriptorPostQueueDepth =
6401 cpu_to_le16(ioc->reply_post_queue_depth);
6402 mpi_request.ReplyFreeQueueDepth =
6403 cpu_to_le16(ioc->reply_free_queue_depth);
6405 mpi_request.SenseBufferAddressHigh =
6406 cpu_to_le32((u64)ioc->sense_dma >> 32);
6407 mpi_request.SystemReplyAddressHigh =
6408 cpu_to_le32((u64)ioc->reply_dma >> 32);
6409 mpi_request.SystemRequestFrameBaseAddress =
6410 cpu_to_le64((u64)ioc->request_dma);
6411 mpi_request.ReplyFreeQueueAddress =
6412 cpu_to_le64((u64)ioc->reply_free_dma);
6414 if (ioc->rdpq_array_enable) {
6415 reply_post_free_array_sz = ioc->reply_queue_count *
6416 sizeof(Mpi2IOCInitRDPQArrayEntry);
6417 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
6418 for (i = 0; i < ioc->reply_queue_count; i++)
6419 ioc->reply_post_free_array[i].RDPQBaseAddress =
6421 (u64)ioc->reply_post[i].reply_post_free_dma);
6422 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
6423 mpi_request.ReplyDescriptorPostQueueAddress =
6424 cpu_to_le64((u64)ioc->reply_post_free_array_dma);
6426 mpi_request.ReplyDescriptorPostQueueAddress =
6427 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
6431 * Set the flag to enable CoreDump state feature in IOC firmware.
6433 mpi_request.ConfigurationFlags |=
6434 cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
6436 /* This time stamp specifies number of milliseconds
6437 * since epoch ~ midnight January 1, 1970.
6439 current_time = ktime_get_real();
6440 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
6442 if (ioc->logging_level & MPT_DEBUG_INIT) {
6446 mfp = (__le32 *)&mpi_request;
6447 ioc_info(ioc, "\toffset:data\n");
6448 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
6449 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
6450 le32_to_cpu(mfp[i]));
6453 r = _base_handshake_req_reply_wait(ioc,
6454 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
6455 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
6458 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6462 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6463 if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
6464 mpi_reply.IOCLogInfo) {
6465 ioc_err(ioc, "%s: failed\n", __func__);
6473 * mpt3sas_port_enable_done - command completion routine for port enable
6474 * @ioc: per adapter object
6475 * @smid: system request message index
6476 * @msix_index: MSIX table index supplied by the OS
6477 * @reply: reply message frame(lower 32bit addr)
6479 * Return: 1 meaning mf should be freed from _base_interrupt
6480 * 0 means the mf is freed from this function.
6483 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
6486 MPI2DefaultReply_t *mpi_reply;
6489 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
6492 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
6496 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
6499 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
6500 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
6501 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
6502 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
6503 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
6504 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6505 ioc->port_enable_failed = 1;
6507 if (ioc->is_driver_loading) {
6508 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
6509 mpt3sas_port_enable_complete(ioc);
6512 ioc->start_scan_failed = ioc_status;
6513 ioc->start_scan = 0;
6517 complete(&ioc->port_enable_cmds.done);
6522 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
6523 * @ioc: per adapter object
6525 * Return: 0 for success, non-zero for failure.
6528 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
6530 Mpi2PortEnableRequest_t *mpi_request;
6531 Mpi2PortEnableReply_t *mpi_reply;
6536 ioc_info(ioc, "sending port enable !!\n");
6538 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6539 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6543 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
6545 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6549 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
6550 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6551 ioc->port_enable_cmds.smid = smid;
6552 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
6553 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
6555 init_completion(&ioc->port_enable_cmds.done);
6556 ioc->put_smid_default(ioc, smid);
6557 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
6558 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
6559 ioc_err(ioc, "%s: timeout\n", __func__);
6560 _debug_dump_mf(mpi_request,
6561 sizeof(Mpi2PortEnableRequest_t)/4);
6562 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
6569 mpi_reply = ioc->port_enable_cmds.reply;
6570 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
6571 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6572 ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
6573 __func__, ioc_status);
6579 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
6580 ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
6585 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
6586 * @ioc: per adapter object
6588 * Return: 0 for success, non-zero for failure.
6591 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
6593 Mpi2PortEnableRequest_t *mpi_request;
6596 ioc_info(ioc, "sending port enable !!\n");
6598 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6599 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6603 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
6605 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6609 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
6610 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6611 ioc->port_enable_cmds.smid = smid;
6612 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
6613 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
6615 ioc->put_smid_default(ioc, smid);
6620 * _base_determine_wait_on_discovery - desposition
6621 * @ioc: per adapter object
6623 * Decide whether to wait on discovery to complete. Used to either
6624 * locate boot device, or report volumes ahead of physical devices.
6626 * Return: 1 for wait, 0 for don't wait.
6629 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
6631 /* We wait for discovery to complete if IR firmware is loaded.
6632 * The sas topology events arrive before PD events, so we need time to
6633 * turn on the bit in ioc->pd_handles to indicate PD
6634 * Also, it maybe required to report Volumes ahead of physical
6635 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
6637 if (ioc->ir_firmware)
6640 /* if no Bios, then we don't need to wait */
6641 if (!ioc->bios_pg3.BiosVersion)
6644 /* Bios is present, then we drop down here.
6646 * If there any entries in the Bios Page 2, then we wait
6647 * for discovery to complete.
6650 /* Current Boot Device */
6651 if ((ioc->bios_pg2.CurrentBootDeviceForm &
6652 MPI2_BIOSPAGE2_FORM_MASK) ==
6653 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6654 /* Request Boot Device */
6655 (ioc->bios_pg2.ReqBootDeviceForm &
6656 MPI2_BIOSPAGE2_FORM_MASK) ==
6657 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6658 /* Alternate Request Boot Device */
6659 (ioc->bios_pg2.ReqAltBootDeviceForm &
6660 MPI2_BIOSPAGE2_FORM_MASK) ==
6661 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
6668 * _base_unmask_events - turn on notification for this event
6669 * @ioc: per adapter object
6670 * @event: firmware event
6672 * The mask is stored in ioc->event_masks.
6675 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
6682 desired_event = (1 << (event % 32));
6685 ioc->event_masks[0] &= ~desired_event;
6686 else if (event < 64)
6687 ioc->event_masks[1] &= ~desired_event;
6688 else if (event < 96)
6689 ioc->event_masks[2] &= ~desired_event;
6690 else if (event < 128)
6691 ioc->event_masks[3] &= ~desired_event;
6695 * _base_event_notification - send event notification
6696 * @ioc: per adapter object
6698 * Return: 0 for success, non-zero for failure.
6701 _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
6703 Mpi2EventNotificationRequest_t *mpi_request;
6708 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6710 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
6711 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6715 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6717 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6720 ioc->base_cmds.status = MPT3_CMD_PENDING;
6721 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6722 ioc->base_cmds.smid = smid;
6723 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
6724 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
6725 mpi_request->VF_ID = 0; /* TODO */
6726 mpi_request->VP_ID = 0;
6727 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
6728 mpi_request->EventMasks[i] =
6729 cpu_to_le32(ioc->event_masks[i]);
6730 init_completion(&ioc->base_cmds.done);
6731 ioc->put_smid_default(ioc, smid);
6732 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
6733 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6734 ioc_err(ioc, "%s: timeout\n", __func__);
6735 _debug_dump_mf(mpi_request,
6736 sizeof(Mpi2EventNotificationRequest_t)/4);
6737 if (ioc->base_cmds.status & MPT3_CMD_RESET)
6742 dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
6743 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6748 * mpt3sas_base_validate_event_type - validating event types
6749 * @ioc: per adapter object
6750 * @event_type: firmware event
6752 * This will turn on firmware event notification when application
6753 * ask for that event. We don't mask events that are already enabled.
6756 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
6759 u32 event_mask, desired_event;
6760 u8 send_update_to_fw;
6762 for (i = 0, send_update_to_fw = 0; i <
6763 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
6764 event_mask = ~event_type[i];
6766 for (j = 0; j < 32; j++) {
6767 if (!(event_mask & desired_event) &&
6768 (ioc->event_masks[i] & desired_event)) {
6769 ioc->event_masks[i] &= ~desired_event;
6770 send_update_to_fw = 1;
6772 desired_event = (desired_event << 1);
6776 if (!send_update_to_fw)
6779 mutex_lock(&ioc->base_cmds.mutex);
6780 _base_event_notification(ioc);
6781 mutex_unlock(&ioc->base_cmds.mutex);
6785 * _base_diag_reset - the "big hammer" start of day reset
6786 * @ioc: per adapter object
6788 * Return: 0 for success, non-zero for failure.
6791 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
6793 u32 host_diagnostic;
6798 ioc_info(ioc, "sending diag reset !!\n");
6800 drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
6804 /* Write magic sequence to WriteSequence register
6805 * Loop until in diagnostic mode
6807 drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
6808 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6809 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
6810 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
6811 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
6812 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
6813 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
6814 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
6821 "Stop writing magic sequence after 20 retries\n");
6822 _base_dump_reg_set(ioc);
6826 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6828 ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
6829 count, host_diagnostic));
6831 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
6833 hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
6835 drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
6836 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
6837 &ioc->chip->HostDiagnostic);
6839 /*This delay allows the chip PCIe hardware time to finish reset tasks*/
6840 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
6842 /* Approximately 300 second max wait */
6843 for (count = 0; count < (300000000 /
6844 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
6846 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6848 if (host_diagnostic == 0xFFFFFFFF) {
6850 "Invalid host diagnostic register value\n");
6851 _base_dump_reg_set(ioc);
6854 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
6857 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
6860 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
6863 ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
6864 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
6865 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
6866 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
6868 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
6869 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
6870 &ioc->chip->HCBSize);
6873 drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
6874 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
6875 &ioc->chip->HostDiagnostic);
6878 ioc_info(ioc, "disable writes to the diagnostic register\n"));
6879 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6881 drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
6882 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
6884 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6885 __func__, ioc_state);
6886 _base_dump_reg_set(ioc);
6890 ioc_info(ioc, "diag reset: SUCCESS\n");
6894 ioc_err(ioc, "diag reset: FAILED\n");
6899 * _base_make_ioc_ready - put controller in READY state
6900 * @ioc: per adapter object
6901 * @type: FORCE_BIG_HAMMER or SOFT_RESET
6903 * Return: 0 for success, non-zero for failure.
6906 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
6912 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6914 if (ioc->pci_error_recovery)
6917 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6919 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6920 __func__, ioc_state));
6922 /* if in RESET state, it should move to READY state shortly */
6924 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
6925 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
6926 MPI2_IOC_STATE_READY) {
6927 if (count++ == 10) {
6928 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6929 __func__, ioc_state);
6933 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6937 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
6940 if (ioc_state & MPI2_DOORBELL_USED) {
6941 ioc_info(ioc, "unexpected doorbell active!\n");
6942 goto issue_diag_reset;
6945 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6946 mpt3sas_print_fault_code(ioc, ioc_state &
6947 MPI2_DOORBELL_DATA_MASK);
6948 goto issue_diag_reset;
6951 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
6953 * if host reset is invoked while watch dog thread is waiting
6954 * for IOC state to be changed to Fault state then driver has
6955 * to wait here for CoreDump state to clear otherwise reset
6956 * will be issued to the FW and FW move the IOC state to
6957 * reset state without copying the FW logs to coredump region.
6959 if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
6960 mpt3sas_print_coredump_info(ioc, ioc_state &
6961 MPI2_DOORBELL_DATA_MASK);
6962 mpt3sas_base_wait_for_coredump_completion(ioc,
6965 goto issue_diag_reset;
6968 if (type == FORCE_BIG_HAMMER)
6969 goto issue_diag_reset;
6971 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6972 if (!(_base_send_ioc_reset(ioc,
6973 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
6978 rc = _base_diag_reset(ioc);
6983 * _base_make_ioc_operational - put controller in OPERATIONAL state
6984 * @ioc: per adapter object
6986 * Return: 0 for success, non-zero for failure.
6989 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6991 int r, i, index, rc;
6992 unsigned long flags;
6995 struct _tr_list *delayed_tr, *delayed_tr_next;
6996 struct _sc_list *delayed_sc, *delayed_sc_next;
6997 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
6999 struct adapter_reply_queue *reply_q;
7000 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
7002 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7004 /* clean the delayed target reset list */
7005 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
7006 &ioc->delayed_tr_list, list) {
7007 list_del(&delayed_tr->list);
7012 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
7013 &ioc->delayed_tr_volume_list, list) {
7014 list_del(&delayed_tr->list);
7018 list_for_each_entry_safe(delayed_sc, delayed_sc_next,
7019 &ioc->delayed_sc_list, list) {
7020 list_del(&delayed_sc->list);
7024 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
7025 &ioc->delayed_event_ack_list, list) {
7026 list_del(&delayed_event_ack->list);
7027 kfree(delayed_event_ack);
7030 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7032 /* hi-priority queue */
7033 INIT_LIST_HEAD(&ioc->hpr_free_list);
7034 smid = ioc->hi_priority_smid;
7035 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
7036 ioc->hpr_lookup[i].cb_idx = 0xFF;
7037 ioc->hpr_lookup[i].smid = smid;
7038 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
7039 &ioc->hpr_free_list);
7042 /* internal queue */
7043 INIT_LIST_HEAD(&ioc->internal_free_list);
7044 smid = ioc->internal_smid;
7045 for (i = 0; i < ioc->internal_depth; i++, smid++) {
7046 ioc->internal_lookup[i].cb_idx = 0xFF;
7047 ioc->internal_lookup[i].smid = smid;
7048 list_add_tail(&ioc->internal_lookup[i].tracker_list,
7049 &ioc->internal_free_list);
7052 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7054 /* initialize Reply Free Queue */
7055 for (i = 0, reply_address = (u32)ioc->reply_dma ;
7056 i < ioc->reply_free_queue_depth ; i++, reply_address +=
7058 ioc->reply_free[i] = cpu_to_le32(reply_address);
7059 if (ioc->is_mcpu_endpoint)
7060 _base_clone_reply_to_sys_mem(ioc,
7064 /* initialize reply queues */
7065 if (ioc->is_driver_loading)
7066 _base_assign_reply_queues(ioc);
7068 /* initialize Reply Post Free Queue */
7070 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
7071 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7073 * If RDPQ is enabled, switch to the next allocation.
7074 * Otherwise advance within the contiguous region.
7076 if (ioc->rdpq_array_enable) {
7077 reply_q->reply_post_free =
7078 ioc->reply_post[index++].reply_post_free;
7080 reply_q->reply_post_free = reply_post_free_contig;
7081 reply_post_free_contig += ioc->reply_post_queue_depth;
7084 reply_q->reply_post_host_index = 0;
7085 for (i = 0; i < ioc->reply_post_queue_depth; i++)
7086 reply_q->reply_post_free[i].Words =
7087 cpu_to_le64(ULLONG_MAX);
7088 if (!_base_is_controller_msix_enabled(ioc))
7089 goto skip_init_reply_post_free_queue;
7091 skip_init_reply_post_free_queue:
7093 r = _base_send_ioc_init(ioc);
7096 * No need to check IOC state for fault state & issue
7097 * diag reset during host reset. This check is need
7098 * only during driver load time.
7100 if (!ioc->is_driver_loading)
7103 rc = _base_check_for_fault_and_issue_reset(ioc);
7104 if (rc || (_base_send_ioc_init(ioc)))
7108 /* initialize reply free host index */
7109 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
7110 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
7112 /* initialize reply post host index */
7113 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7114 if (ioc->combined_reply_queue)
7115 writel((reply_q->msix_index & 7)<<
7116 MPI2_RPHI_MSIX_INDEX_SHIFT,
7117 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
7119 writel(reply_q->msix_index <<
7120 MPI2_RPHI_MSIX_INDEX_SHIFT,
7121 &ioc->chip->ReplyPostHostIndex);
7123 if (!_base_is_controller_msix_enabled(ioc))
7124 goto skip_init_reply_post_host_index;
7127 skip_init_reply_post_host_index:
7129 mpt3sas_base_unmask_interrupts(ioc);
7131 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
7132 r = _base_display_fwpkg_version(ioc);
7137 _base_static_config_pages(ioc);
7138 r = _base_event_notification(ioc);
7142 if (ioc->is_driver_loading) {
7144 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
7147 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
7148 MFG_PAGE10_HIDE_SSDS_MASK);
7149 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
7150 ioc->mfg_pg10_hide_flag = hide_flag;
7153 ioc->wait_for_discovery_to_complete =
7154 _base_determine_wait_on_discovery(ioc);
7156 return r; /* scan_start and scan_finished support */
7159 r = _base_send_port_enable(ioc);
7167 * mpt3sas_base_free_resources - free resources controller resources
7168 * @ioc: per adapter object
7171 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
7173 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7175 /* synchronizing freeing resource with pci_access_mutex lock */
7176 mutex_lock(&ioc->pci_access_mutex);
7177 if (ioc->chip_phys && ioc->chip) {
7178 mpt3sas_base_mask_interrupts(ioc);
7179 ioc->shost_recovery = 1;
7180 _base_make_ioc_ready(ioc, SOFT_RESET);
7181 ioc->shost_recovery = 0;
7184 mpt3sas_base_unmap_resources(ioc);
7185 mutex_unlock(&ioc->pci_access_mutex);
7190 * mpt3sas_base_attach - attach controller instance
7191 * @ioc: per adapter object
7193 * Return: 0 for success, non-zero for failure.
7196 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
7199 int cpu_id, last_cpu_id = 0;
7201 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7203 /* setup cpu_msix_table */
7204 ioc->cpu_count = num_online_cpus();
7205 for_each_online_cpu(cpu_id)
7206 last_cpu_id = cpu_id;
7207 ioc->cpu_msix_table_sz = last_cpu_id + 1;
7208 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
7209 ioc->reply_queue_count = 1;
7210 if (!ioc->cpu_msix_table) {
7211 ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
7213 goto out_free_resources;
7216 if (ioc->is_warpdrive) {
7217 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
7218 sizeof(resource_size_t *), GFP_KERNEL);
7219 if (!ioc->reply_post_host_index) {
7220 ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
7222 goto out_free_resources;
7226 ioc->smp_affinity_enable = smp_affinity_enable;
7228 ioc->rdpq_array_enable_assigned = 0;
7229 ioc->use_32bit_dma = false;
7230 if (ioc->is_aero_ioc)
7231 ioc->base_readl = &_base_readl_aero;
7233 ioc->base_readl = &_base_readl;
7234 r = mpt3sas_base_map_resources(ioc);
7236 goto out_free_resources;
7238 pci_set_drvdata(ioc->pdev, ioc->shost);
7239 r = _base_get_ioc_facts(ioc);
7241 rc = _base_check_for_fault_and_issue_reset(ioc);
7242 if (rc || (_base_get_ioc_facts(ioc)))
7243 goto out_free_resources;
7246 switch (ioc->hba_mpi_version_belonged) {
7248 ioc->build_sg_scmd = &_base_build_sg_scmd;
7249 ioc->build_sg = &_base_build_sg;
7250 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
7251 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7257 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
7258 * Target Status - all require the IEEE formated scatter gather
7261 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
7262 ioc->build_sg = &_base_build_sg_ieee;
7263 ioc->build_nvme_prp = &_base_build_nvme_prp;
7264 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
7265 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
7266 if (ioc->high_iops_queues)
7267 ioc->get_msix_index_for_smlio =
7268 &_base_get_high_iops_msix_index;
7270 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7273 if (ioc->atomic_desc_capable) {
7274 ioc->put_smid_default = &_base_put_smid_default_atomic;
7275 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
7276 ioc->put_smid_fast_path =
7277 &_base_put_smid_fast_path_atomic;
7278 ioc->put_smid_hi_priority =
7279 &_base_put_smid_hi_priority_atomic;
7281 ioc->put_smid_default = &_base_put_smid_default;
7282 ioc->put_smid_fast_path = &_base_put_smid_fast_path;
7283 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
7284 if (ioc->is_mcpu_endpoint)
7285 ioc->put_smid_scsi_io =
7286 &_base_put_smid_mpi_ep_scsi_io;
7288 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
7291 * These function pointers for other requests that don't
7292 * the require IEEE scatter gather elements.
7294 * For example Configuration Pages and SAS IOUNIT Control don't.
7296 ioc->build_sg_mpi = &_base_build_sg;
7297 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
7299 r = _base_make_ioc_ready(ioc, SOFT_RESET);
7301 goto out_free_resources;
7303 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
7304 sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
7307 goto out_free_resources;
7310 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
7311 r = _base_get_port_facts(ioc, i);
7313 rc = _base_check_for_fault_and_issue_reset(ioc);
7314 if (rc || (_base_get_port_facts(ioc, i)))
7315 goto out_free_resources;
7319 r = _base_allocate_memory_pools(ioc);
7321 goto out_free_resources;
7323 if (irqpoll_weight > 0)
7324 ioc->thresh_hold = irqpoll_weight;
7326 ioc->thresh_hold = ioc->hba_queue_depth/4;
7328 _base_init_irqpolls(ioc);
7329 init_waitqueue_head(&ioc->reset_wq);
7331 /* allocate memory pd handle bitmask list */
7332 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
7333 if (ioc->facts.MaxDevHandle % 8)
7334 ioc->pd_handles_sz++;
7335 ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
7337 if (!ioc->pd_handles) {
7339 goto out_free_resources;
7341 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
7343 if (!ioc->blocking_handles) {
7345 goto out_free_resources;
7348 /* allocate memory for pending OS device add list */
7349 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
7350 if (ioc->facts.MaxDevHandle % 8)
7351 ioc->pend_os_device_add_sz++;
7352 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
7354 if (!ioc->pend_os_device_add)
7355 goto out_free_resources;
7357 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
7358 ioc->device_remove_in_progress =
7359 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
7360 if (!ioc->device_remove_in_progress)
7361 goto out_free_resources;
7363 ioc->fwfault_debug = mpt3sas_fwfault_debug;
7365 /* base internal command bits */
7366 mutex_init(&ioc->base_cmds.mutex);
7367 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7368 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7370 /* port_enable command bits */
7371 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7372 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
7374 /* transport internal command bits */
7375 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7376 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
7377 mutex_init(&ioc->transport_cmds.mutex);
7379 /* scsih internal command bits */
7380 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7381 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7382 mutex_init(&ioc->scsih_cmds.mutex);
7384 /* task management internal command bits */
7385 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7386 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
7387 mutex_init(&ioc->tm_cmds.mutex);
7389 /* config page internal command bits */
7390 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7391 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
7392 mutex_init(&ioc->config_cmds.mutex);
7394 /* ctl module internal command bits */
7395 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7396 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
7397 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
7398 mutex_init(&ioc->ctl_cmds.mutex);
7400 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
7401 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
7402 !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
7403 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
7405 goto out_free_resources;
7408 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
7409 ioc->event_masks[i] = -1;
7411 /* here we enable the events we care about */
7412 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
7413 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
7414 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
7415 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
7416 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
7417 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
7418 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
7419 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
7420 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
7421 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
7422 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
7423 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
7424 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
7425 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
7426 if (ioc->is_gen35_ioc) {
7427 _base_unmask_events(ioc,
7428 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
7429 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
7430 _base_unmask_events(ioc,
7431 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
7434 r = _base_make_ioc_operational(ioc);
7436 goto out_free_resources;
7439 * Copy current copy of IOCFacts in prev_fw_facts
7440 * and it will be used during online firmware upgrade.
7442 memcpy(&ioc->prev_fw_facts, &ioc->facts,
7443 sizeof(struct mpt3sas_facts));
7445 ioc->non_operational_loop = 0;
7446 ioc->ioc_coredump_loop = 0;
7447 ioc->got_task_abort_from_ioctl = 0;
7452 ioc->remove_host = 1;
7454 mpt3sas_base_free_resources(ioc);
7455 _base_release_memory_pools(ioc);
7456 pci_set_drvdata(ioc->pdev, NULL);
7457 kfree(ioc->cpu_msix_table);
7458 if (ioc->is_warpdrive)
7459 kfree(ioc->reply_post_host_index);
7460 kfree(ioc->pd_handles);
7461 kfree(ioc->blocking_handles);
7462 kfree(ioc->device_remove_in_progress);
7463 kfree(ioc->pend_os_device_add);
7464 kfree(ioc->tm_cmds.reply);
7465 kfree(ioc->transport_cmds.reply);
7466 kfree(ioc->scsih_cmds.reply);
7467 kfree(ioc->config_cmds.reply);
7468 kfree(ioc->base_cmds.reply);
7469 kfree(ioc->port_enable_cmds.reply);
7470 kfree(ioc->ctl_cmds.reply);
7471 kfree(ioc->ctl_cmds.sense);
7473 ioc->ctl_cmds.reply = NULL;
7474 ioc->base_cmds.reply = NULL;
7475 ioc->tm_cmds.reply = NULL;
7476 ioc->scsih_cmds.reply = NULL;
7477 ioc->transport_cmds.reply = NULL;
7478 ioc->config_cmds.reply = NULL;
7485 * mpt3sas_base_detach - remove controller instance
7486 * @ioc: per adapter object
7489 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
7491 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7493 mpt3sas_base_stop_watchdog(ioc);
7494 mpt3sas_base_free_resources(ioc);
7495 _base_release_memory_pools(ioc);
7496 mpt3sas_free_enclosure_list(ioc);
7497 pci_set_drvdata(ioc->pdev, NULL);
7498 kfree(ioc->cpu_msix_table);
7499 if (ioc->is_warpdrive)
7500 kfree(ioc->reply_post_host_index);
7501 kfree(ioc->pd_handles);
7502 kfree(ioc->blocking_handles);
7503 kfree(ioc->device_remove_in_progress);
7504 kfree(ioc->pend_os_device_add);
7506 kfree(ioc->ctl_cmds.reply);
7507 kfree(ioc->ctl_cmds.sense);
7508 kfree(ioc->base_cmds.reply);
7509 kfree(ioc->port_enable_cmds.reply);
7510 kfree(ioc->tm_cmds.reply);
7511 kfree(ioc->transport_cmds.reply);
7512 kfree(ioc->scsih_cmds.reply);
7513 kfree(ioc->config_cmds.reply);
7517 * _base_pre_reset_handler - pre reset handler
7518 * @ioc: per adapter object
7520 static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
7522 mpt3sas_scsih_pre_reset_handler(ioc);
7523 mpt3sas_ctl_pre_reset_handler(ioc);
7524 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
7528 * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
7529 * @ioc: per adapter object
7532 _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
7535 ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
7536 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
7537 ioc->transport_cmds.status |= MPT3_CMD_RESET;
7538 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
7539 complete(&ioc->transport_cmds.done);
7541 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
7542 ioc->base_cmds.status |= MPT3_CMD_RESET;
7543 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
7544 complete(&ioc->base_cmds.done);
7546 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7547 ioc->port_enable_failed = 1;
7548 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
7549 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
7550 if (ioc->is_driver_loading) {
7551 ioc->start_scan_failed =
7552 MPI2_IOCSTATUS_INTERNAL_ERROR;
7553 ioc->start_scan = 0;
7554 ioc->port_enable_cmds.status =
7557 complete(&ioc->port_enable_cmds.done);
7560 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
7561 ioc->config_cmds.status |= MPT3_CMD_RESET;
7562 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
7563 ioc->config_cmds.smid = USHRT_MAX;
7564 complete(&ioc->config_cmds.done);
7569 * _base_clear_outstanding_commands - clear all outstanding commands
7570 * @ioc: per adapter object
7572 static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
7574 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
7575 mpt3sas_ctl_clear_outstanding_ioctls(ioc);
7576 _base_clear_outstanding_mpt_commands(ioc);
7580 * _base_reset_done_handler - reset done handler
7581 * @ioc: per adapter object
7583 static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
7585 mpt3sas_scsih_reset_done_handler(ioc);
7586 mpt3sas_ctl_reset_done_handler(ioc);
7587 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
7591 * mpt3sas_wait_for_commands_to_complete - reset controller
7592 * @ioc: Pointer to MPT_ADAPTER structure
7594 * This function is waiting 10s for all pending commands to complete
7595 * prior to putting controller in reset.
7598 mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
7602 ioc->pending_io_count = 0;
7604 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7605 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
7608 /* pending command count */
7609 ioc->pending_io_count = scsi_host_busy(ioc->shost);
7611 if (!ioc->pending_io_count)
7614 /* wait for pending commands to complete */
7615 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
7619 * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
7620 * attributes during online firmware upgrade and update the corresponding
7621 * IOC variables accordingly.
7623 * @ioc: Pointer to MPT_ADAPTER structure
7626 _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
7629 void *pd_handles = NULL, *blocking_handles = NULL;
7630 void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
7631 struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
7633 if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
7634 pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
7635 if (ioc->facts.MaxDevHandle % 8)
7638 pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
7642 "Unable to allocate the memory for pd_handles of sz: %d\n",
7646 memset(pd_handles + ioc->pd_handles_sz, 0,
7647 (pd_handles_sz - ioc->pd_handles_sz));
7648 ioc->pd_handles = pd_handles;
7650 blocking_handles = krealloc(ioc->blocking_handles,
7651 pd_handles_sz, GFP_KERNEL);
7652 if (!blocking_handles) {
7654 "Unable to allocate the memory for "
7655 "blocking_handles of sz: %d\n",
7659 memset(blocking_handles + ioc->pd_handles_sz, 0,
7660 (pd_handles_sz - ioc->pd_handles_sz));
7661 ioc->blocking_handles = blocking_handles;
7662 ioc->pd_handles_sz = pd_handles_sz;
7664 pend_os_device_add = krealloc(ioc->pend_os_device_add,
7665 pd_handles_sz, GFP_KERNEL);
7666 if (!pend_os_device_add) {
7668 "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
7672 memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
7673 (pd_handles_sz - ioc->pend_os_device_add_sz));
7674 ioc->pend_os_device_add = pend_os_device_add;
7675 ioc->pend_os_device_add_sz = pd_handles_sz;
7677 device_remove_in_progress = krealloc(
7678 ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
7679 if (!device_remove_in_progress) {
7681 "Unable to allocate the memory for "
7682 "device_remove_in_progress of sz: %d\n "
7686 memset(device_remove_in_progress +
7687 ioc->device_remove_in_progress_sz, 0,
7688 (pd_handles_sz - ioc->device_remove_in_progress_sz));
7689 ioc->device_remove_in_progress = device_remove_in_progress;
7690 ioc->device_remove_in_progress_sz = pd_handles_sz;
7693 memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
7698 * mpt3sas_base_hard_reset_handler - reset controller
7699 * @ioc: Pointer to MPT_ADAPTER structure
7700 * @type: FORCE_BIG_HAMMER or SOFT_RESET
7702 * Return: 0 for success, non-zero for failure.
7705 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
7706 enum reset_type type)
7709 unsigned long flags;
7711 u8 is_fault = 0, is_trigger = 0;
7713 dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
7715 if (ioc->pci_error_recovery) {
7716 ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
7721 if (mpt3sas_fwfault_debug)
7722 mpt3sas_halt_firmware(ioc);
7724 /* wait for an active reset in progress to complete */
7725 mutex_lock(&ioc->reset_in_progress_mutex);
7727 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
7728 ioc->shost_recovery = 1;
7729 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
7731 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
7732 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
7733 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
7734 MPT3_DIAG_BUFFER_IS_RELEASED))) {
7736 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7737 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
7738 (ioc_state & MPI2_IOC_STATE_MASK) ==
7739 MPI2_IOC_STATE_COREDUMP)
7742 _base_pre_reset_handler(ioc);
7743 mpt3sas_wait_for_commands_to_complete(ioc);
7744 mpt3sas_base_mask_interrupts(ioc);
7745 r = _base_make_ioc_ready(ioc, type);
7748 _base_clear_outstanding_commands(ioc);
7750 /* If this hard reset is called while port enable is active, then
7751 * there is no reason to call make_ioc_operational
7753 if (ioc->is_driver_loading && ioc->port_enable_failed) {
7754 ioc->remove_host = 1;
7758 r = _base_get_ioc_facts(ioc);
7762 r = _base_check_ioc_facts_changes(ioc);
7765 "Some of the parameters got changed in this new firmware"
7766 " image and it requires system reboot\n");
7769 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
7770 panic("%s: Issue occurred with flashing controller firmware."
7771 "Please reboot the system and ensure that the correct"
7772 " firmware version is running\n", ioc->name);
7774 r = _base_make_ioc_operational(ioc);
7776 _base_reset_done_handler(ioc);
7779 ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
7781 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
7782 ioc->shost_recovery = 0;
7783 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
7784 ioc->ioc_reset_count++;
7785 mutex_unlock(&ioc->reset_in_progress_mutex);
7788 if ((r == 0) && is_trigger) {
7790 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
7792 mpt3sas_trigger_master(ioc,
7793 MASTER_TRIGGER_ADAPTER_RESET);
7795 dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));