96b78fdc6b8a942cee5eab39bf1b3e7f1713df63
[linux-2.6-microblaze.git] / drivers / scsi / mpt3sas / mpt3sas_base.c
1 /*
2  * This is the Fusion MPT base driver providing common API layer interface
3  * for access to MPT (Message Passing Technology) firmware.
4  *
5  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
6  * Copyright (C) 2012-2014  LSI Corporation
7  * Copyright (C) 2013-2014 Avago Technologies
8  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * NO WARRANTY
21  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25  * solely responsible for determining the appropriateness of using and
26  * distributing the Program and assumes all risks associated with its
27  * exercise of rights under this Agreement, including but not limited to
28  * the risks and costs of program errors, damage to or loss of data,
29  * programs or equipment, and unavailability or interruption of operations.
30
31  * DISCLAIMER OF LIABILITY
32  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40  * You should have received a copy of the GNU General Public License
41  * along with this program; if not, write to the Free Software
42  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
43  * USA.
44  */
45
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/kdev_t.h>
54 #include <linux/blkdev.h>
55 #include <linux/delay.h>
56 #include <linux/interrupt.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/io.h>
59 #include <linux/time.h>
60 #include <linux/ktime.h>
61 #include <linux/kthread.h>
62 #include <asm/page.h>        /* To get host page size per arch */
63 #include <linux/aer.h>
64
65
66 #include "mpt3sas_base.h"
67
68 static MPT_CALLBACK     mpt_callbacks[MPT_MAX_CALLBACKS];
69
70
71 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
72
73  /* maximum controller queue depth */
74 #define MAX_HBA_QUEUE_DEPTH     30000
75 #define MAX_CHAIN_DEPTH         100000
76 static int max_queue_depth = -1;
77 module_param(max_queue_depth, int, 0444);
78 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
79
80 static int max_sgl_entries = -1;
81 module_param(max_sgl_entries, int, 0444);
82 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
83
84 static int msix_disable = -1;
85 module_param(msix_disable, int, 0444);
86 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
87
88 static int smp_affinity_enable = 1;
89 module_param(smp_affinity_enable, int, 0444);
90 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
91
92 static int max_msix_vectors = -1;
93 module_param(max_msix_vectors, int, 0444);
94 MODULE_PARM_DESC(max_msix_vectors,
95         " max msix vectors");
96
97 static int irqpoll_weight = -1;
98 module_param(irqpoll_weight, int, 0444);
99 MODULE_PARM_DESC(irqpoll_weight,
100         "irq poll weight (default= one fourth of HBA queue depth)");
101
102 static int mpt3sas_fwfault_debug;
103 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
104         " enable detection of firmware fault and halt firmware - (default=0)");
105
106 static int perf_mode = -1;
107 module_param(perf_mode, int, 0444);
108 MODULE_PARM_DESC(perf_mode,
109         "Performance mode (only for Aero/Sea Generation), options:\n\t\t"
110         "0 - balanced: high iops mode is enabled &\n\t\t"
111         "interrupt coalescing is enabled only on high iops queues,\n\t\t"
112         "1 - iops: high iops mode is disabled &\n\t\t"
113         "interrupt coalescing is enabled on all queues,\n\t\t"
114         "2 - latency: high iops mode is disabled &\n\t\t"
115         "interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
116         "\t\tdefault - default perf_mode is 'balanced'"
117         );
118
119 enum mpt3sas_perf_mode {
120         MPT_PERF_MODE_DEFAULT   = -1,
121         MPT_PERF_MODE_BALANCED  = 0,
122         MPT_PERF_MODE_IOPS      = 1,
123         MPT_PERF_MODE_LATENCY   = 2,
124 };
125
126 static int
127 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
128                 u32 ioc_state, int timeout);
129 static int
130 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
131 static void
132 _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc);
133 static void
134 _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
135
136 /**
137  * mpt3sas_base_check_cmd_timeout - Function
138  *              to check timeout and command termination due
139  *              to Host reset.
140  *
141  * @ioc:        per adapter object.
142  * @status:     Status of issued command.
143  * @mpi_request:mf request pointer.
144  * @sz:         size of buffer.
145  *
146  * @Returns - 1/0 Reset to be done or Not
147  */
148 u8
149 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
150                 u8 status, void *mpi_request, int sz)
151 {
152         u8 issue_reset = 0;
153
154         if (!(status & MPT3_CMD_RESET))
155                 issue_reset = 1;
156
157         ioc_err(ioc, "Command %s\n",
158                 issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
159         _debug_dump_mf(mpi_request, sz);
160
161         return issue_reset;
162 }
163
164 /**
165  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
166  * @val: ?
167  * @kp: ?
168  *
169  * Return: ?
170  */
171 static int
172 _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
173 {
174         int ret = param_set_int(val, kp);
175         struct MPT3SAS_ADAPTER *ioc;
176
177         if (ret)
178                 return ret;
179
180         /* global ioc spinlock to protect controller list on list operations */
181         pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
182         spin_lock(&gioc_lock);
183         list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
184                 ioc->fwfault_debug = mpt3sas_fwfault_debug;
185         spin_unlock(&gioc_lock);
186         return 0;
187 }
188 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
189         param_get_int, &mpt3sas_fwfault_debug, 0644);
190
191 /**
192  * _base_readl_aero - retry readl for max three times.
193  * @addr - MPT Fusion system interface register address
194  *
195  * Retry the readl() for max three times if it gets zero value
196  * while reading the system interface register.
197  */
198 static inline u32
199 _base_readl_aero(const volatile void __iomem *addr)
200 {
201         u32 i = 0, ret_val;
202
203         do {
204                 ret_val = readl(addr);
205                 i++;
206         } while (ret_val == 0 && i < 3);
207
208         return ret_val;
209 }
210
211 static inline u32
212 _base_readl(const volatile void __iomem *addr)
213 {
214         return readl(addr);
215 }
216
217 /**
218  * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
219  *                                in BAR0 space.
220  *
221  * @ioc: per adapter object
222  * @reply: reply message frame(lower 32bit addr)
223  * @index: System request message index.
224  */
225 static void
226 _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
227                 u32 index)
228 {
229         /*
230          * 256 is offset within sys register.
231          * 256 offset MPI frame starts. Max MPI frame supported is 32.
232          * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
233          */
234         u16 cmd_credit = ioc->facts.RequestCredit + 1;
235         void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
236                         MPI_FRAME_START_OFFSET +
237                         (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
238
239         writel(reply, reply_free_iomem);
240 }
241
242 /**
243  * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
244  *                              to system/BAR0 region.
245  *
246  * @dst_iomem: Pointer to the destination location in BAR0 space.
247  * @src: Pointer to the Source data.
248  * @size: Size of data to be copied.
249  */
250 static void
251 _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
252 {
253         int i;
254         u32 *src_virt_mem = (u32 *)src;
255
256         for (i = 0; i < size/4; i++)
257                 writel((u32)src_virt_mem[i],
258                                 (void __iomem *)dst_iomem + (i * 4));
259 }
260
261 /**
262  * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
263  *
264  * @dst_iomem: Pointer to the destination location in BAR0 space.
265  * @src: Pointer to the Source data.
266  * @size: Size of data to be copied.
267  */
268 static void
269 _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
270 {
271         int i;
272         u32 *src_virt_mem = (u32 *)(src);
273
274         for (i = 0; i < size/4; i++)
275                 writel((u32)src_virt_mem[i],
276                         (void __iomem *)dst_iomem + (i * 4));
277 }
278
279 /**
280  * _base_get_chain - Calculates and Returns virtual chain address
281  *                       for the provided smid in BAR0 space.
282  *
283  * @ioc: per adapter object
284  * @smid: system request message index
285  * @sge_chain_count: Scatter gather chain count.
286  *
287  * Return: the chain address.
288  */
289 static inline void __iomem*
290 _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
291                 u8 sge_chain_count)
292 {
293         void __iomem *base_chain, *chain_virt;
294         u16 cmd_credit = ioc->facts.RequestCredit + 1;
295
296         base_chain  = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
297                 (cmd_credit * ioc->request_sz) +
298                 REPLY_FREE_POOL_SIZE;
299         chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
300                         ioc->request_sz) + (sge_chain_count * ioc->request_sz);
301         return chain_virt;
302 }
303
304 /**
305  * _base_get_chain_phys - Calculates and Returns physical address
306  *                      in BAR0 for scatter gather chains, for
307  *                      the provided smid.
308  *
309  * @ioc: per adapter object
310  * @smid: system request message index
311  * @sge_chain_count: Scatter gather chain count.
312  *
313  * Return: Physical chain address.
314  */
315 static inline phys_addr_t
316 _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
317                 u8 sge_chain_count)
318 {
319         phys_addr_t base_chain_phys, chain_phys;
320         u16 cmd_credit = ioc->facts.RequestCredit + 1;
321
322         base_chain_phys  = ioc->chip_phys + MPI_FRAME_START_OFFSET +
323                 (cmd_credit * ioc->request_sz) +
324                 REPLY_FREE_POOL_SIZE;
325         chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
326                         ioc->request_sz) + (sge_chain_count * ioc->request_sz);
327         return chain_phys;
328 }
329
330 /**
331  * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
332  *                      buffer address for the provided smid.
333  *                      (Each smid can have 64K starts from 17024)
334  *
335  * @ioc: per adapter object
336  * @smid: system request message index
337  *
338  * Return: Pointer to buffer location in BAR0.
339  */
340
341 static void __iomem *
342 _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
343 {
344         u16 cmd_credit = ioc->facts.RequestCredit + 1;
345         // Added extra 1 to reach end of chain.
346         void __iomem *chain_end = _base_get_chain(ioc,
347                         cmd_credit + 1,
348                         ioc->facts.MaxChainDepth);
349         return chain_end + (smid * 64 * 1024);
350 }
351
352 /**
353  * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
354  *              Host buffer Physical address for the provided smid.
355  *              (Each smid can have 64K starts from 17024)
356  *
357  * @ioc: per adapter object
358  * @smid: system request message index
359  *
360  * Return: Pointer to buffer location in BAR0.
361  */
362 static phys_addr_t
363 _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
364 {
365         u16 cmd_credit = ioc->facts.RequestCredit + 1;
366         phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
367                         cmd_credit + 1,
368                         ioc->facts.MaxChainDepth);
369         return chain_end_phys + (smid * 64 * 1024);
370 }
371
372 /**
373  * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
374  *                      lookup list and Provides chain_buffer
375  *                      address for the matching dma address.
376  *                      (Each smid can have 64K starts from 17024)
377  *
378  * @ioc: per adapter object
379  * @chain_buffer_dma: Chain buffer dma address.
380  *
381  * Return: Pointer to chain buffer. Or Null on Failure.
382  */
383 static void *
384 _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
385                 dma_addr_t chain_buffer_dma)
386 {
387         u16 index, j;
388         struct chain_tracker *ct;
389
390         for (index = 0; index < ioc->scsiio_depth; index++) {
391                 for (j = 0; j < ioc->chains_needed_per_io; j++) {
392                         ct = &ioc->chain_lookup[index].chains_per_smid[j];
393                         if (ct && ct->chain_buffer_dma == chain_buffer_dma)
394                                 return ct->chain_buffer;
395                 }
396         }
397         ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
398         return NULL;
399 }
400
401 /**
402  * _clone_sg_entries -  MPI EP's scsiio and config requests
403  *                      are handled here. Base function for
404  *                      double buffering, before submitting
405  *                      the requests.
406  *
407  * @ioc: per adapter object.
408  * @mpi_request: mf request pointer.
409  * @smid: system request message index.
410  */
411 static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
412                 void *mpi_request, u16 smid)
413 {
414         Mpi2SGESimple32_t *sgel, *sgel_next;
415         u32  sgl_flags, sge_chain_count = 0;
416         bool is_write = false;
417         u16 i = 0;
418         void __iomem *buffer_iomem;
419         phys_addr_t buffer_iomem_phys;
420         void __iomem *buff_ptr;
421         phys_addr_t buff_ptr_phys;
422         void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
423         void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
424         phys_addr_t dst_addr_phys;
425         MPI2RequestHeader_t *request_hdr;
426         struct scsi_cmnd *scmd;
427         struct scatterlist *sg_scmd = NULL;
428         int is_scsiio_req = 0;
429
430         request_hdr = (MPI2RequestHeader_t *) mpi_request;
431
432         if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
433                 Mpi25SCSIIORequest_t *scsiio_request =
434                         (Mpi25SCSIIORequest_t *)mpi_request;
435                 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
436                 is_scsiio_req = 1;
437         } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
438                 Mpi2ConfigRequest_t  *config_req =
439                         (Mpi2ConfigRequest_t *)mpi_request;
440                 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
441         } else
442                 return;
443
444         /* From smid we can get scsi_cmd, once we have sg_scmd,
445          * we just need to get sg_virt and sg_next to get virual
446          * address associated with sgel->Address.
447          */
448
449         if (is_scsiio_req) {
450                 /* Get scsi_cmd using smid */
451                 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
452                 if (scmd == NULL) {
453                         ioc_err(ioc, "scmd is NULL\n");
454                         return;
455                 }
456
457                 /* Get sg_scmd from scmd provided */
458                 sg_scmd = scsi_sglist(scmd);
459         }
460
461         /*
462          * 0 - 255      System register
463          * 256 - 4352   MPI Frame. (This is based on maxCredit 32)
464          * 4352 - 4864  Reply_free pool (512 byte is reserved
465          *              considering maxCredit 32. Reply need extra
466          *              room, for mCPU case kept four times of
467          *              maxCredit).
468          * 4864 - 17152 SGE chain element. (32cmd * 3 chain of
469          *              128 byte size = 12288)
470          * 17152 - x    Host buffer mapped with smid.
471          *              (Each smid can have 64K Max IO.)
472          * BAR0+Last 1K MSIX Addr and Data
473          * Total size in use 2113664 bytes of 4MB BAR0
474          */
475
476         buffer_iomem = _base_get_buffer_bar0(ioc, smid);
477         buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
478
479         buff_ptr = buffer_iomem;
480         buff_ptr_phys = buffer_iomem_phys;
481         WARN_ON(buff_ptr_phys > U32_MAX);
482
483         if (le32_to_cpu(sgel->FlagsLength) &
484                         (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
485                 is_write = true;
486
487         for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
488
489                 sgl_flags =
490                     (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
491
492                 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
493                 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
494                         /*
495                          * Helper function which on passing
496                          * chain_buffer_dma returns chain_buffer. Get
497                          * the virtual address for sgel->Address
498                          */
499                         sgel_next =
500                                 _base_get_chain_buffer_dma_to_chain_buffer(ioc,
501                                                 le32_to_cpu(sgel->Address));
502                         if (sgel_next == NULL)
503                                 return;
504                         /*
505                          * This is coping 128 byte chain
506                          * frame (not a host buffer)
507                          */
508                         dst_chain_addr[sge_chain_count] =
509                                 _base_get_chain(ioc,
510                                         smid, sge_chain_count);
511                         src_chain_addr[sge_chain_count] =
512                                                 (void *) sgel_next;
513                         dst_addr_phys = _base_get_chain_phys(ioc,
514                                                 smid, sge_chain_count);
515                         WARN_ON(dst_addr_phys > U32_MAX);
516                         sgel->Address =
517                                 cpu_to_le32(lower_32_bits(dst_addr_phys));
518                         sgel = sgel_next;
519                         sge_chain_count++;
520                         break;
521                 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
522                         if (is_write) {
523                                 if (is_scsiio_req) {
524                                         _base_clone_to_sys_mem(buff_ptr,
525                                             sg_virt(sg_scmd),
526                                             (le32_to_cpu(sgel->FlagsLength) &
527                                             0x00ffffff));
528                                         /*
529                                          * FIXME: this relies on a a zero
530                                          * PCI mem_offset.
531                                          */
532                                         sgel->Address =
533                                             cpu_to_le32((u32)buff_ptr_phys);
534                                 } else {
535                                         _base_clone_to_sys_mem(buff_ptr,
536                                             ioc->config_vaddr,
537                                             (le32_to_cpu(sgel->FlagsLength) &
538                                             0x00ffffff));
539                                         sgel->Address =
540                                             cpu_to_le32((u32)buff_ptr_phys);
541                                 }
542                         }
543                         buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
544                             0x00ffffff);
545                         buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
546                             0x00ffffff);
547                         if ((le32_to_cpu(sgel->FlagsLength) &
548                             (MPI2_SGE_FLAGS_END_OF_BUFFER
549                                         << MPI2_SGE_FLAGS_SHIFT)))
550                                 goto eob_clone_chain;
551                         else {
552                                 /*
553                                  * Every single element in MPT will have
554                                  * associated sg_next. Better to sanity that
555                                  * sg_next is not NULL, but it will be a bug
556                                  * if it is null.
557                                  */
558                                 if (is_scsiio_req) {
559                                         sg_scmd = sg_next(sg_scmd);
560                                         if (sg_scmd)
561                                                 sgel++;
562                                         else
563                                                 goto eob_clone_chain;
564                                 }
565                         }
566                         break;
567                 }
568         }
569
570 eob_clone_chain:
571         for (i = 0; i < sge_chain_count; i++) {
572                 if (is_scsiio_req)
573                         _base_clone_to_sys_mem(dst_chain_addr[i],
574                                 src_chain_addr[i], ioc->request_sz);
575         }
576 }
577
578 /**
579  *  mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
580  * @arg: input argument, used to derive ioc
581  *
582  * Return:
583  * 0 if controller is removed from pci subsystem.
584  * -1 for other case.
585  */
586 static int mpt3sas_remove_dead_ioc_func(void *arg)
587 {
588         struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
589         struct pci_dev *pdev;
590
591         if (!ioc)
592                 return -1;
593
594         pdev = ioc->pdev;
595         if (!pdev)
596                 return -1;
597         pci_stop_and_remove_bus_device_locked(pdev);
598         return 0;
599 }
600
601 /**
602  * _base_fault_reset_work - workq handling ioc fault conditions
603  * @work: input argument, used to derive ioc
604  *
605  * Context: sleep.
606  */
607 static void
608 _base_fault_reset_work(struct work_struct *work)
609 {
610         struct MPT3SAS_ADAPTER *ioc =
611             container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
612         unsigned long    flags;
613         u32 doorbell;
614         int rc;
615         struct task_struct *p;
616
617
618         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
619         if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
620                         ioc->pci_error_recovery)
621                 goto rearm_timer;
622         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
623
624         doorbell = mpt3sas_base_get_iocstate(ioc, 0);
625         if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
626                 ioc_err(ioc, "SAS host is non-operational !!!!\n");
627
628                 /* It may be possible that EEH recovery can resolve some of
629                  * pci bus failure issues rather removing the dead ioc function
630                  * by considering controller is in a non-operational state. So
631                  * here priority is given to the EEH recovery. If it doesn't
632                  * not resolve this issue, mpt3sas driver will consider this
633                  * controller to non-operational state and remove the dead ioc
634                  * function.
635                  */
636                 if (ioc->non_operational_loop++ < 5) {
637                         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
638                                                          flags);
639                         goto rearm_timer;
640                 }
641
642                 /*
643                  * Call _scsih_flush_pending_cmds callback so that we flush all
644                  * pending commands back to OS. This call is required to aovid
645                  * deadlock at block layer. Dead IOC will fail to do diag reset,
646                  * and this call is safe since dead ioc will never return any
647                  * command back from HW.
648                  */
649                 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
650                 /*
651                  * Set remove_host flag early since kernel thread will
652                  * take some time to execute.
653                  */
654                 ioc->remove_host = 1;
655                 /*Remove the Dead Host */
656                 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
657                     "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
658                 if (IS_ERR(p))
659                         ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
660                                 __func__);
661                 else
662                         ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
663                                 __func__);
664                 return; /* don't rearm timer */
665         }
666
667         if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
668                 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
669                     ioc->manu_pg11.CoreDumpTOSec :
670                     MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
671
672                 timeout /= (FAULT_POLLING_INTERVAL/1000);
673
674                 if (ioc->ioc_coredump_loop == 0) {
675                         mpt3sas_print_coredump_info(ioc,
676                             doorbell & MPI2_DOORBELL_DATA_MASK);
677                         /* do not accept any IOs and disable the interrupts */
678                         spin_lock_irqsave(
679                             &ioc->ioc_reset_in_progress_lock, flags);
680                         ioc->shost_recovery = 1;
681                         spin_unlock_irqrestore(
682                             &ioc->ioc_reset_in_progress_lock, flags);
683                         _base_mask_interrupts(ioc);
684                         _base_clear_outstanding_commands(ioc);
685                 }
686
687                 ioc_info(ioc, "%s: CoreDump loop %d.",
688                     __func__, ioc->ioc_coredump_loop);
689
690                 /* Wait until CoreDump completes or times out */
691                 if (ioc->ioc_coredump_loop++ < timeout) {
692                         spin_lock_irqsave(
693                             &ioc->ioc_reset_in_progress_lock, flags);
694                         goto rearm_timer;
695                 }
696         }
697
698         if (ioc->ioc_coredump_loop) {
699                 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
700                         ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
701                             __func__, ioc->ioc_coredump_loop);
702                 else
703                         ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
704                             __func__, ioc->ioc_coredump_loop);
705                 ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
706         }
707         ioc->non_operational_loop = 0;
708         if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
709                 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
710                 ioc_warn(ioc, "%s: hard reset: %s\n",
711                          __func__, rc == 0 ? "success" : "failed");
712                 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
713                 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
714                         mpt3sas_print_fault_code(ioc, doorbell &
715                             MPI2_DOORBELL_DATA_MASK);
716                 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
717                     MPI2_IOC_STATE_COREDUMP)
718                         mpt3sas_print_coredump_info(ioc, doorbell &
719                             MPI2_DOORBELL_DATA_MASK);
720                 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
721                     MPI2_IOC_STATE_OPERATIONAL)
722                         return; /* don't rearm timer */
723         }
724         ioc->ioc_coredump_loop = 0;
725
726         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
727  rearm_timer:
728         if (ioc->fault_reset_work_q)
729                 queue_delayed_work(ioc->fault_reset_work_q,
730                     &ioc->fault_reset_work,
731                     msecs_to_jiffies(FAULT_POLLING_INTERVAL));
732         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
733 }
734
735 /**
736  * mpt3sas_base_start_watchdog - start the fault_reset_work_q
737  * @ioc: per adapter object
738  *
739  * Context: sleep.
740  */
741 void
742 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
743 {
744         unsigned long    flags;
745
746         if (ioc->fault_reset_work_q)
747                 return;
748
749         /* initialize fault polling */
750
751         INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
752         snprintf(ioc->fault_reset_work_q_name,
753             sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
754             ioc->driver_name, ioc->id);
755         ioc->fault_reset_work_q =
756                 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
757         if (!ioc->fault_reset_work_q) {
758                 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
759                 return;
760         }
761         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
762         if (ioc->fault_reset_work_q)
763                 queue_delayed_work(ioc->fault_reset_work_q,
764                     &ioc->fault_reset_work,
765                     msecs_to_jiffies(FAULT_POLLING_INTERVAL));
766         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
767 }
768
769 /**
770  * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
771  * @ioc: per adapter object
772  *
773  * Context: sleep.
774  */
775 void
776 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
777 {
778         unsigned long flags;
779         struct workqueue_struct *wq;
780
781         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
782         wq = ioc->fault_reset_work_q;
783         ioc->fault_reset_work_q = NULL;
784         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
785         if (wq) {
786                 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
787                         flush_workqueue(wq);
788                 destroy_workqueue(wq);
789         }
790 }
791
792 /**
793  * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
794  * @ioc: per adapter object
795  * @fault_code: fault code
796  */
797 void
798 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
799 {
800         ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
801 }
802
803 /**
804  * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
805  * @ioc: per adapter object
806  * @fault_code: fault code
807  *
808  * Return nothing.
809  */
810 void
811 mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
812 {
813         ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
814 }
815
816 /**
817  * mpt3sas_base_wait_for_coredump_completion - Wait until coredump
818  * completes or times out
819  * @ioc: per adapter object
820  *
821  * Returns 0 for success, non-zero for failure.
822  */
823 int
824 mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
825                 const char *caller)
826 {
827         u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
828                         ioc->manu_pg11.CoreDumpTOSec :
829                         MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
830
831         int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
832                                         timeout);
833
834         if (ioc_state)
835                 ioc_err(ioc,
836                     "%s: CoreDump timed out. (ioc_state=0x%x)\n",
837                     caller, ioc_state);
838         else
839                 ioc_info(ioc,
840                     "%s: CoreDump completed. (ioc_state=0x%x)\n",
841                     caller, ioc_state);
842
843         return ioc_state;
844 }
845
846 /**
847  * mpt3sas_halt_firmware - halt's mpt controller firmware
848  * @ioc: per adapter object
849  *
850  * For debugging timeout related issues.  Writing 0xCOFFEE00
851  * to the doorbell register will halt controller firmware. With
852  * the purpose to stop both driver and firmware, the enduser can
853  * obtain a ring buffer from controller UART.
854  */
855 void
856 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
857 {
858         u32 doorbell;
859
860         if (!ioc->fwfault_debug)
861                 return;
862
863         dump_stack();
864
865         doorbell = ioc->base_readl(&ioc->chip->Doorbell);
866         if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
867                 mpt3sas_print_fault_code(ioc, doorbell &
868                     MPI2_DOORBELL_DATA_MASK);
869         } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
870             MPI2_IOC_STATE_COREDUMP) {
871                 mpt3sas_print_coredump_info(ioc, doorbell &
872                     MPI2_DOORBELL_DATA_MASK);
873         } else {
874                 writel(0xC0FFEE00, &ioc->chip->Doorbell);
875                 ioc_err(ioc, "Firmware is halted due to command timeout\n");
876         }
877
878         if (ioc->fwfault_debug == 2)
879                 for (;;)
880                         ;
881         else
882                 panic("panic in %s\n", __func__);
883 }
884
885 /**
886  * _base_sas_ioc_info - verbose translation of the ioc status
887  * @ioc: per adapter object
888  * @mpi_reply: reply mf payload returned from firmware
889  * @request_hdr: request mf
890  */
891 static void
892 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
893         MPI2RequestHeader_t *request_hdr)
894 {
895         u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
896             MPI2_IOCSTATUS_MASK;
897         char *desc = NULL;
898         u16 frame_sz;
899         char *func_str = NULL;
900
901         /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
902         if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
903             request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
904             request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
905                 return;
906
907         if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
908                 return;
909
910         switch (ioc_status) {
911
912 /****************************************************************************
913 *  Common IOCStatus values for all replies
914 ****************************************************************************/
915
916         case MPI2_IOCSTATUS_INVALID_FUNCTION:
917                 desc = "invalid function";
918                 break;
919         case MPI2_IOCSTATUS_BUSY:
920                 desc = "busy";
921                 break;
922         case MPI2_IOCSTATUS_INVALID_SGL:
923                 desc = "invalid sgl";
924                 break;
925         case MPI2_IOCSTATUS_INTERNAL_ERROR:
926                 desc = "internal error";
927                 break;
928         case MPI2_IOCSTATUS_INVALID_VPID:
929                 desc = "invalid vpid";
930                 break;
931         case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
932                 desc = "insufficient resources";
933                 break;
934         case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
935                 desc = "insufficient power";
936                 break;
937         case MPI2_IOCSTATUS_INVALID_FIELD:
938                 desc = "invalid field";
939                 break;
940         case MPI2_IOCSTATUS_INVALID_STATE:
941                 desc = "invalid state";
942                 break;
943         case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
944                 desc = "op state not supported";
945                 break;
946
947 /****************************************************************************
948 *  Config IOCStatus values
949 ****************************************************************************/
950
951         case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
952                 desc = "config invalid action";
953                 break;
954         case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
955                 desc = "config invalid type";
956                 break;
957         case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
958                 desc = "config invalid page";
959                 break;
960         case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
961                 desc = "config invalid data";
962                 break;
963         case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
964                 desc = "config no defaults";
965                 break;
966         case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
967                 desc = "config cant commit";
968                 break;
969
970 /****************************************************************************
971 *  SCSI IO Reply
972 ****************************************************************************/
973
974         case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
975         case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
976         case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
977         case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
978         case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
979         case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
980         case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
981         case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
982         case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
983         case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
984         case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
985         case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
986                 break;
987
988 /****************************************************************************
989 *  For use by SCSI Initiator and SCSI Target end-to-end data protection
990 ****************************************************************************/
991
992         case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
993                 desc = "eedp guard error";
994                 break;
995         case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
996                 desc = "eedp ref tag error";
997                 break;
998         case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
999                 desc = "eedp app tag error";
1000                 break;
1001
1002 /****************************************************************************
1003 *  SCSI Target values
1004 ****************************************************************************/
1005
1006         case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
1007                 desc = "target invalid io index";
1008                 break;
1009         case MPI2_IOCSTATUS_TARGET_ABORTED:
1010                 desc = "target aborted";
1011                 break;
1012         case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
1013                 desc = "target no conn retryable";
1014                 break;
1015         case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
1016                 desc = "target no connection";
1017                 break;
1018         case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
1019                 desc = "target xfer count mismatch";
1020                 break;
1021         case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
1022                 desc = "target data offset error";
1023                 break;
1024         case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
1025                 desc = "target too much write data";
1026                 break;
1027         case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
1028                 desc = "target iu too short";
1029                 break;
1030         case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
1031                 desc = "target ack nak timeout";
1032                 break;
1033         case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
1034                 desc = "target nak received";
1035                 break;
1036
1037 /****************************************************************************
1038 *  Serial Attached SCSI values
1039 ****************************************************************************/
1040
1041         case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
1042                 desc = "smp request failed";
1043                 break;
1044         case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
1045                 desc = "smp data overrun";
1046                 break;
1047
1048 /****************************************************************************
1049 *  Diagnostic Buffer Post / Diagnostic Release values
1050 ****************************************************************************/
1051
1052         case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
1053                 desc = "diagnostic released";
1054                 break;
1055         default:
1056                 break;
1057         }
1058
1059         if (!desc)
1060                 return;
1061
1062         switch (request_hdr->Function) {
1063         case MPI2_FUNCTION_CONFIG:
1064                 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
1065                 func_str = "config_page";
1066                 break;
1067         case MPI2_FUNCTION_SCSI_TASK_MGMT:
1068                 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
1069                 func_str = "task_mgmt";
1070                 break;
1071         case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
1072                 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
1073                 func_str = "sas_iounit_ctl";
1074                 break;
1075         case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
1076                 frame_sz = sizeof(Mpi2SepRequest_t);
1077                 func_str = "enclosure";
1078                 break;
1079         case MPI2_FUNCTION_IOC_INIT:
1080                 frame_sz = sizeof(Mpi2IOCInitRequest_t);
1081                 func_str = "ioc_init";
1082                 break;
1083         case MPI2_FUNCTION_PORT_ENABLE:
1084                 frame_sz = sizeof(Mpi2PortEnableRequest_t);
1085                 func_str = "port_enable";
1086                 break;
1087         case MPI2_FUNCTION_SMP_PASSTHROUGH:
1088                 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
1089                 func_str = "smp_passthru";
1090                 break;
1091         case MPI2_FUNCTION_NVME_ENCAPSULATED:
1092                 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
1093                     ioc->sge_size;
1094                 func_str = "nvme_encapsulated";
1095                 break;
1096         default:
1097                 frame_sz = 32;
1098                 func_str = "unknown";
1099                 break;
1100         }
1101
1102         ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
1103                  desc, ioc_status, request_hdr, func_str);
1104
1105         _debug_dump_mf(request_hdr, frame_sz/4);
1106 }
1107
1108 /**
1109  * _base_display_event_data - verbose translation of firmware asyn events
1110  * @ioc: per adapter object
1111  * @mpi_reply: reply mf payload returned from firmware
1112  */
1113 static void
1114 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1115         Mpi2EventNotificationReply_t *mpi_reply)
1116 {
1117         char *desc = NULL;
1118         u16 event;
1119
1120         if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
1121                 return;
1122
1123         event = le16_to_cpu(mpi_reply->Event);
1124
1125         switch (event) {
1126         case MPI2_EVENT_LOG_DATA:
1127                 desc = "Log Data";
1128                 break;
1129         case MPI2_EVENT_STATE_CHANGE:
1130                 desc = "Status Change";
1131                 break;
1132         case MPI2_EVENT_HARD_RESET_RECEIVED:
1133                 desc = "Hard Reset Received";
1134                 break;
1135         case MPI2_EVENT_EVENT_CHANGE:
1136                 desc = "Event Change";
1137                 break;
1138         case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1139                 desc = "Device Status Change";
1140                 break;
1141         case MPI2_EVENT_IR_OPERATION_STATUS:
1142                 if (!ioc->hide_ir_msg)
1143                         desc = "IR Operation Status";
1144                 break;
1145         case MPI2_EVENT_SAS_DISCOVERY:
1146         {
1147                 Mpi2EventDataSasDiscovery_t *event_data =
1148                     (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1149                 ioc_info(ioc, "Discovery: (%s)",
1150                          event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1151                          "start" : "stop");
1152                 if (event_data->DiscoveryStatus)
1153                         pr_cont(" discovery_status(0x%08x)",
1154                             le32_to_cpu(event_data->DiscoveryStatus));
1155                 pr_cont("\n");
1156                 return;
1157         }
1158         case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1159                 desc = "SAS Broadcast Primitive";
1160                 break;
1161         case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1162                 desc = "SAS Init Device Status Change";
1163                 break;
1164         case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1165                 desc = "SAS Init Table Overflow";
1166                 break;
1167         case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1168                 desc = "SAS Topology Change List";
1169                 break;
1170         case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1171                 desc = "SAS Enclosure Device Status Change";
1172                 break;
1173         case MPI2_EVENT_IR_VOLUME:
1174                 if (!ioc->hide_ir_msg)
1175                         desc = "IR Volume";
1176                 break;
1177         case MPI2_EVENT_IR_PHYSICAL_DISK:
1178                 if (!ioc->hide_ir_msg)
1179                         desc = "IR Physical Disk";
1180                 break;
1181         case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1182                 if (!ioc->hide_ir_msg)
1183                         desc = "IR Configuration Change List";
1184                 break;
1185         case MPI2_EVENT_LOG_ENTRY_ADDED:
1186                 if (!ioc->hide_ir_msg)
1187                         desc = "Log Entry Added";
1188                 break;
1189         case MPI2_EVENT_TEMP_THRESHOLD:
1190                 desc = "Temperature Threshold";
1191                 break;
1192         case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1193                 desc = "Cable Event";
1194                 break;
1195         case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1196                 desc = "SAS Device Discovery Error";
1197                 break;
1198         case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1199                 desc = "PCIE Device Status Change";
1200                 break;
1201         case MPI2_EVENT_PCIE_ENUMERATION:
1202         {
1203                 Mpi26EventDataPCIeEnumeration_t *event_data =
1204                         (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1205                 ioc_info(ioc, "PCIE Enumeration: (%s)",
1206                          event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1207                          "start" : "stop");
1208                 if (event_data->EnumerationStatus)
1209                         pr_cont("enumeration_status(0x%08x)",
1210                                 le32_to_cpu(event_data->EnumerationStatus));
1211                 pr_cont("\n");
1212                 return;
1213         }
1214         case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1215                 desc = "PCIE Topology Change List";
1216                 break;
1217         }
1218
1219         if (!desc)
1220                 return;
1221
1222         ioc_info(ioc, "%s\n", desc);
1223 }
1224
1225 /**
1226  * _base_sas_log_info - verbose translation of firmware log info
1227  * @ioc: per adapter object
1228  * @log_info: log info
1229  */
1230 static void
1231 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
1232 {
1233         union loginfo_type {
1234                 u32     loginfo;
1235                 struct {
1236                         u32     subcode:16;
1237                         u32     code:8;
1238                         u32     originator:4;
1239                         u32     bus_type:4;
1240                 } dw;
1241         };
1242         union loginfo_type sas_loginfo;
1243         char *originator_str = NULL;
1244
1245         sas_loginfo.loginfo = log_info;
1246         if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
1247                 return;
1248
1249         /* each nexus loss loginfo */
1250         if (log_info == 0x31170000)
1251                 return;
1252
1253         /* eat the loginfos associated with task aborts */
1254         if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1255             0x31140000 || log_info == 0x31130000))
1256                 return;
1257
1258         switch (sas_loginfo.dw.originator) {
1259         case 0:
1260                 originator_str = "IOP";
1261                 break;
1262         case 1:
1263                 originator_str = "PL";
1264                 break;
1265         case 2:
1266                 if (!ioc->hide_ir_msg)
1267                         originator_str = "IR";
1268                 else
1269                         originator_str = "WarpDrive";
1270                 break;
1271         }
1272
1273         ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1274                  log_info,
1275                  originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
1276 }
1277
1278 /**
1279  * _base_display_reply_info -
1280  * @ioc: per adapter object
1281  * @smid: system request message index
1282  * @msix_index: MSIX table index supplied by the OS
1283  * @reply: reply message frame(lower 32bit addr)
1284  */
1285 static void
1286 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1287         u32 reply)
1288 {
1289         MPI2DefaultReply_t *mpi_reply;
1290         u16 ioc_status;
1291         u32 loginfo = 0;
1292
1293         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1294         if (unlikely(!mpi_reply)) {
1295                 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1296                         __FILE__, __LINE__, __func__);
1297                 return;
1298         }
1299         ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
1300
1301         if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1302             (ioc->logging_level & MPT_DEBUG_REPLY)) {
1303                 _base_sas_ioc_info(ioc , mpi_reply,
1304                    mpt3sas_base_get_msg_frame(ioc, smid));
1305         }
1306
1307         if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1308                 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1309                 _base_sas_log_info(ioc, loginfo);
1310         }
1311
1312         if (ioc_status || loginfo) {
1313                 ioc_status &= MPI2_IOCSTATUS_MASK;
1314                 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1315         }
1316 }
1317
1318 /**
1319  * mpt3sas_base_done - base internal command completion routine
1320  * @ioc: per adapter object
1321  * @smid: system request message index
1322  * @msix_index: MSIX table index supplied by the OS
1323  * @reply: reply message frame(lower 32bit addr)
1324  *
1325  * Return:
1326  * 1 meaning mf should be freed from _base_interrupt
1327  * 0 means the mf is freed from this function.
1328  */
1329 u8
1330 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1331         u32 reply)
1332 {
1333         MPI2DefaultReply_t *mpi_reply;
1334
1335         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1336         if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
1337                 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1338
1339         if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1340                 return 1;
1341
1342         ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1343         if (mpi_reply) {
1344                 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1345                 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1346         }
1347         ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1348
1349         complete(&ioc->base_cmds.done);
1350         return 1;
1351 }
1352
1353 /**
1354  * _base_async_event - main callback handler for firmware asyn events
1355  * @ioc: per adapter object
1356  * @msix_index: MSIX table index supplied by the OS
1357  * @reply: reply message frame(lower 32bit addr)
1358  *
1359  * Return:
1360  * 1 meaning mf should be freed from _base_interrupt
1361  * 0 means the mf is freed from this function.
1362  */
1363 static u8
1364 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1365 {
1366         Mpi2EventNotificationReply_t *mpi_reply;
1367         Mpi2EventAckRequest_t *ack_request;
1368         u16 smid;
1369         struct _event_ack_list *delayed_event_ack;
1370
1371         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1372         if (!mpi_reply)
1373                 return 1;
1374         if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1375                 return 1;
1376
1377         _base_display_event_data(ioc, mpi_reply);
1378
1379         if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1380                 goto out;
1381         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1382         if (!smid) {
1383                 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1384                                         GFP_ATOMIC);
1385                 if (!delayed_event_ack)
1386                         goto out;
1387                 INIT_LIST_HEAD(&delayed_event_ack->list);
1388                 delayed_event_ack->Event = mpi_reply->Event;
1389                 delayed_event_ack->EventContext = mpi_reply->EventContext;
1390                 list_add_tail(&delayed_event_ack->list,
1391                                 &ioc->delayed_event_ack_list);
1392                 dewtprintk(ioc,
1393                            ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1394                                     le16_to_cpu(mpi_reply->Event)));
1395                 goto out;
1396         }
1397
1398         ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1399         memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1400         ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1401         ack_request->Event = mpi_reply->Event;
1402         ack_request->EventContext = mpi_reply->EventContext;
1403         ack_request->VF_ID = 0;  /* TODO */
1404         ack_request->VP_ID = 0;
1405         ioc->put_smid_default(ioc, smid);
1406
1407  out:
1408
1409         /* scsih callback handler */
1410         mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1411
1412         /* ctl callback handler */
1413         mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1414
1415         return 1;
1416 }
1417
1418 static struct scsiio_tracker *
1419 _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1420 {
1421         struct scsi_cmnd *cmd;
1422
1423         if (WARN_ON(!smid) ||
1424             WARN_ON(smid >= ioc->hi_priority_smid))
1425                 return NULL;
1426
1427         cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1428         if (cmd)
1429                 return scsi_cmd_priv(cmd);
1430
1431         return NULL;
1432 }
1433
1434 /**
1435  * _base_get_cb_idx - obtain the callback index
1436  * @ioc: per adapter object
1437  * @smid: system request message index
1438  *
1439  * Return: callback index.
1440  */
1441 static u8
1442 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1443 {
1444         int i;
1445         u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1446         u8 cb_idx = 0xFF;
1447
1448         if (smid < ioc->hi_priority_smid) {
1449                 struct scsiio_tracker *st;
1450
1451                 if (smid < ctl_smid) {
1452                         st = _get_st_from_smid(ioc, smid);
1453                         if (st)
1454                                 cb_idx = st->cb_idx;
1455                 } else if (smid == ctl_smid)
1456                         cb_idx = ioc->ctl_cb_idx;
1457         } else if (smid < ioc->internal_smid) {
1458                 i = smid - ioc->hi_priority_smid;
1459                 cb_idx = ioc->hpr_lookup[i].cb_idx;
1460         } else if (smid <= ioc->hba_queue_depth) {
1461                 i = smid - ioc->internal_smid;
1462                 cb_idx = ioc->internal_lookup[i].cb_idx;
1463         }
1464         return cb_idx;
1465 }
1466
1467 /**
1468  * _base_mask_interrupts - disable interrupts
1469  * @ioc: per adapter object
1470  *
1471  * Disabling ResetIRQ, Reply and Doorbell Interrupts
1472  */
1473 static void
1474 _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1475 {
1476         u32 him_register;
1477
1478         ioc->mask_interrupts = 1;
1479         him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1480         him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1481         writel(him_register, &ioc->chip->HostInterruptMask);
1482         ioc->base_readl(&ioc->chip->HostInterruptMask);
1483 }
1484
1485 /**
1486  * _base_unmask_interrupts - enable interrupts
1487  * @ioc: per adapter object
1488  *
1489  * Enabling only Reply Interrupts
1490  */
1491 static void
1492 _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1493 {
1494         u32 him_register;
1495
1496         him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1497         him_register &= ~MPI2_HIM_RIM;
1498         writel(him_register, &ioc->chip->HostInterruptMask);
1499         ioc->mask_interrupts = 0;
1500 }
1501
1502 union reply_descriptor {
1503         u64 word;
1504         struct {
1505                 u32 low;
1506                 u32 high;
1507         } u;
1508 };
1509
1510 static u32 base_mod64(u64 dividend, u32 divisor)
1511 {
1512         u32 remainder;
1513
1514         if (!divisor)
1515                 pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
1516         remainder = do_div(dividend, divisor);
1517         return remainder;
1518 }
1519
1520 /**
1521  * _base_process_reply_queue - Process reply descriptors from reply
1522  *              descriptor post queue.
1523  * @reply_q: per IRQ's reply queue object.
1524  *
1525  * Return: number of reply descriptors processed from reply
1526  *              descriptor queue.
1527  */
1528 static int
1529 _base_process_reply_queue(struct adapter_reply_queue *reply_q)
1530 {
1531         union reply_descriptor rd;
1532         u64 completed_cmds;
1533         u8 request_descript_type;
1534         u16 smid;
1535         u8 cb_idx;
1536         u32 reply;
1537         u8 msix_index = reply_q->msix_index;
1538         struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1539         Mpi2ReplyDescriptorsUnion_t *rpf;
1540         u8 rc;
1541
1542         completed_cmds = 0;
1543         if (!atomic_add_unless(&reply_q->busy, 1, 1))
1544                 return completed_cmds;
1545
1546         rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1547         request_descript_type = rpf->Default.ReplyFlags
1548              & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1549         if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1550                 atomic_dec(&reply_q->busy);
1551                 return completed_cmds;
1552         }
1553
1554         cb_idx = 0xFF;
1555         do {
1556                 rd.word = le64_to_cpu(rpf->Words);
1557                 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1558                         goto out;
1559                 reply = 0;
1560                 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1561                 if (request_descript_type ==
1562                     MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1563                     request_descript_type ==
1564                     MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1565                     request_descript_type ==
1566                     MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
1567                         cb_idx = _base_get_cb_idx(ioc, smid);
1568                         if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1569                             (likely(mpt_callbacks[cb_idx] != NULL))) {
1570                                 rc = mpt_callbacks[cb_idx](ioc, smid,
1571                                     msix_index, 0);
1572                                 if (rc)
1573                                         mpt3sas_base_free_smid(ioc, smid);
1574                         }
1575                 } else if (request_descript_type ==
1576                     MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1577                         reply = le32_to_cpu(
1578                             rpf->AddressReply.ReplyFrameAddress);
1579                         if (reply > ioc->reply_dma_max_address ||
1580                             reply < ioc->reply_dma_min_address)
1581                                 reply = 0;
1582                         if (smid) {
1583                                 cb_idx = _base_get_cb_idx(ioc, smid);
1584                                 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1585                                     (likely(mpt_callbacks[cb_idx] != NULL))) {
1586                                         rc = mpt_callbacks[cb_idx](ioc, smid,
1587                                             msix_index, reply);
1588                                         if (reply)
1589                                                 _base_display_reply_info(ioc,
1590                                                     smid, msix_index, reply);
1591                                         if (rc)
1592                                                 mpt3sas_base_free_smid(ioc,
1593                                                     smid);
1594                                 }
1595                         } else {
1596                                 _base_async_event(ioc, msix_index, reply);
1597                         }
1598
1599                         /* reply free queue handling */
1600                         if (reply) {
1601                                 ioc->reply_free_host_index =
1602                                     (ioc->reply_free_host_index ==
1603                                     (ioc->reply_free_queue_depth - 1)) ?
1604                                     0 : ioc->reply_free_host_index + 1;
1605                                 ioc->reply_free[ioc->reply_free_host_index] =
1606                                     cpu_to_le32(reply);
1607                                 if (ioc->is_mcpu_endpoint)
1608                                         _base_clone_reply_to_sys_mem(ioc,
1609                                                 reply,
1610                                                 ioc->reply_free_host_index);
1611                                 writel(ioc->reply_free_host_index,
1612                                     &ioc->chip->ReplyFreeHostIndex);
1613                         }
1614                 }
1615
1616                 rpf->Words = cpu_to_le64(ULLONG_MAX);
1617                 reply_q->reply_post_host_index =
1618                     (reply_q->reply_post_host_index ==
1619                     (ioc->reply_post_queue_depth - 1)) ? 0 :
1620                     reply_q->reply_post_host_index + 1;
1621                 request_descript_type =
1622                     reply_q->reply_post_free[reply_q->reply_post_host_index].
1623                     Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1624                 completed_cmds++;
1625                 /* Update the reply post host index after continuously
1626                  * processing the threshold number of Reply Descriptors.
1627                  * So that FW can find enough entries to post the Reply
1628                  * Descriptors in the reply descriptor post queue.
1629                  */
1630                 if (!base_mod64(completed_cmds, ioc->thresh_hold)) {
1631                         if (ioc->combined_reply_queue) {
1632                                 writel(reply_q->reply_post_host_index |
1633                                                 ((msix_index  & 7) <<
1634                                                  MPI2_RPHI_MSIX_INDEX_SHIFT),
1635                                     ioc->replyPostRegisterIndex[msix_index/8]);
1636                         } else {
1637                                 writel(reply_q->reply_post_host_index |
1638                                                 (msix_index <<
1639                                                  MPI2_RPHI_MSIX_INDEX_SHIFT),
1640                                                 &ioc->chip->ReplyPostHostIndex);
1641                         }
1642                         if (!reply_q->irq_poll_scheduled) {
1643                                 reply_q->irq_poll_scheduled = true;
1644                                 irq_poll_sched(&reply_q->irqpoll);
1645                         }
1646                         atomic_dec(&reply_q->busy);
1647                         return completed_cmds;
1648                 }
1649                 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1650                         goto out;
1651                 if (!reply_q->reply_post_host_index)
1652                         rpf = reply_q->reply_post_free;
1653                 else
1654                         rpf++;
1655         } while (1);
1656
1657  out:
1658
1659         if (!completed_cmds) {
1660                 atomic_dec(&reply_q->busy);
1661                 return completed_cmds;
1662         }
1663
1664         if (ioc->is_warpdrive) {
1665                 writel(reply_q->reply_post_host_index,
1666                 ioc->reply_post_host_index[msix_index]);
1667                 atomic_dec(&reply_q->busy);
1668                 return completed_cmds;
1669         }
1670
1671         /* Update Reply Post Host Index.
1672          * For those HBA's which support combined reply queue feature
1673          * 1. Get the correct Supplemental Reply Post Host Index Register.
1674          *    i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1675          *    Index Register address bank i.e replyPostRegisterIndex[],
1676          * 2. Then update this register with new reply host index value
1677          *    in ReplyPostIndex field and the MSIxIndex field with
1678          *    msix_index value reduced to a value between 0 and 7,
1679          *    using a modulo 8 operation. Since each Supplemental Reply Post
1680          *    Host Index Register supports 8 MSI-X vectors.
1681          *
1682          * For other HBA's just update the Reply Post Host Index register with
1683          * new reply host index value in ReplyPostIndex Field and msix_index
1684          * value in MSIxIndex field.
1685          */
1686         if (ioc->combined_reply_queue)
1687                 writel(reply_q->reply_post_host_index | ((msix_index  & 7) <<
1688                         MPI2_RPHI_MSIX_INDEX_SHIFT),
1689                         ioc->replyPostRegisterIndex[msix_index/8]);
1690         else
1691                 writel(reply_q->reply_post_host_index | (msix_index <<
1692                         MPI2_RPHI_MSIX_INDEX_SHIFT),
1693                         &ioc->chip->ReplyPostHostIndex);
1694         atomic_dec(&reply_q->busy);
1695         return completed_cmds;
1696 }
1697
1698 /**
1699  * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
1700  * @irq: irq number (not used)
1701  * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
1702  *
1703  * Return: IRQ_HANDLED if processed, else IRQ_NONE.
1704  */
1705 static irqreturn_t
1706 _base_interrupt(int irq, void *bus_id)
1707 {
1708         struct adapter_reply_queue *reply_q = bus_id;
1709         struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1710
1711         if (ioc->mask_interrupts)
1712                 return IRQ_NONE;
1713         if (reply_q->irq_poll_scheduled)
1714                 return IRQ_HANDLED;
1715         return ((_base_process_reply_queue(reply_q) > 0) ?
1716                         IRQ_HANDLED : IRQ_NONE);
1717 }
1718
1719 /**
1720  * _base_irqpoll - IRQ poll callback handler
1721  * @irqpoll - irq_poll object
1722  * @budget - irq poll weight
1723  *
1724  * returns number of reply descriptors processed
1725  */
1726 static int
1727 _base_irqpoll(struct irq_poll *irqpoll, int budget)
1728 {
1729         struct adapter_reply_queue *reply_q;
1730         int num_entries = 0;
1731
1732         reply_q = container_of(irqpoll, struct adapter_reply_queue,
1733                         irqpoll);
1734         if (reply_q->irq_line_enable) {
1735                 disable_irq(reply_q->os_irq);
1736                 reply_q->irq_line_enable = false;
1737         }
1738         num_entries = _base_process_reply_queue(reply_q);
1739         if (num_entries < budget) {
1740                 irq_poll_complete(irqpoll);
1741                 reply_q->irq_poll_scheduled = false;
1742                 reply_q->irq_line_enable = true;
1743                 enable_irq(reply_q->os_irq);
1744         }
1745
1746         return num_entries;
1747 }
1748
1749 /**
1750  * _base_init_irqpolls - initliaze IRQ polls
1751  * @ioc: per adapter object
1752  *
1753  * returns nothing
1754  */
1755 static void
1756 _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
1757 {
1758         struct adapter_reply_queue *reply_q, *next;
1759
1760         if (list_empty(&ioc->reply_queue_list))
1761                 return;
1762
1763         list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1764                 irq_poll_init(&reply_q->irqpoll,
1765                         ioc->hba_queue_depth/4, _base_irqpoll);
1766                 reply_q->irq_poll_scheduled = false;
1767                 reply_q->irq_line_enable = true;
1768                 reply_q->os_irq = pci_irq_vector(ioc->pdev,
1769                     reply_q->msix_index);
1770         }
1771 }
1772
1773 /**
1774  * _base_is_controller_msix_enabled - is controller support muli-reply queues
1775  * @ioc: per adapter object
1776  *
1777  * Return: Whether or not MSI/X is enabled.
1778  */
1779 static inline int
1780 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1781 {
1782         return (ioc->facts.IOCCapabilities &
1783             MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1784 }
1785
1786 /**
1787  * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
1788  * @ioc: per adapter object
1789  * Context: non ISR conext
1790  *
1791  * Called when a Task Management request has completed.
1792  */
1793 void
1794 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
1795 {
1796         struct adapter_reply_queue *reply_q;
1797
1798         /* If MSIX capability is turned off
1799          * then multi-queues are not enabled
1800          */
1801         if (!_base_is_controller_msix_enabled(ioc))
1802                 return;
1803
1804         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1805                 if (ioc->shost_recovery || ioc->remove_host ||
1806                                 ioc->pci_error_recovery)
1807                         return;
1808                 /* TMs are on msix_index == 0 */
1809                 if (reply_q->msix_index == 0)
1810                         continue;
1811                 if (reply_q->irq_poll_scheduled) {
1812                         /* Calling irq_poll_disable will wait for any pending
1813                          * callbacks to have completed.
1814                          */
1815                         irq_poll_disable(&reply_q->irqpoll);
1816                         irq_poll_enable(&reply_q->irqpoll);
1817                         reply_q->irq_poll_scheduled = false;
1818                         reply_q->irq_line_enable = true;
1819                         enable_irq(reply_q->os_irq);
1820                         continue;
1821                 }
1822                 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
1823         }
1824 }
1825
1826 /**
1827  * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1828  * @cb_idx: callback index
1829  */
1830 void
1831 mpt3sas_base_release_callback_handler(u8 cb_idx)
1832 {
1833         mpt_callbacks[cb_idx] = NULL;
1834 }
1835
1836 /**
1837  * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1838  * @cb_func: callback function
1839  *
1840  * Return: Index of @cb_func.
1841  */
1842 u8
1843 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1844 {
1845         u8 cb_idx;
1846
1847         for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1848                 if (mpt_callbacks[cb_idx] == NULL)
1849                         break;
1850
1851         mpt_callbacks[cb_idx] = cb_func;
1852         return cb_idx;
1853 }
1854
1855 /**
1856  * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1857  */
1858 void
1859 mpt3sas_base_initialize_callback_handler(void)
1860 {
1861         u8 cb_idx;
1862
1863         for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1864                 mpt3sas_base_release_callback_handler(cb_idx);
1865 }
1866
1867
1868 /**
1869  * _base_build_zero_len_sge - build zero length sg entry
1870  * @ioc: per adapter object
1871  * @paddr: virtual address for SGE
1872  *
1873  * Create a zero length scatter gather entry to insure the IOCs hardware has
1874  * something to use if the target device goes brain dead and tries
1875  * to send data even when none is asked for.
1876  */
1877 static void
1878 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1879 {
1880         u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1881             MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1882             MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1883             MPI2_SGE_FLAGS_SHIFT);
1884         ioc->base_add_sg_single(paddr, flags_length, -1);
1885 }
1886
1887 /**
1888  * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1889  * @paddr: virtual address for SGE
1890  * @flags_length: SGE flags and data transfer length
1891  * @dma_addr: Physical address
1892  */
1893 static void
1894 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1895 {
1896         Mpi2SGESimple32_t *sgel = paddr;
1897
1898         flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1899             MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1900         sgel->FlagsLength = cpu_to_le32(flags_length);
1901         sgel->Address = cpu_to_le32(dma_addr);
1902 }
1903
1904
1905 /**
1906  * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1907  * @paddr: virtual address for SGE
1908  * @flags_length: SGE flags and data transfer length
1909  * @dma_addr: Physical address
1910  */
1911 static void
1912 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1913 {
1914         Mpi2SGESimple64_t *sgel = paddr;
1915
1916         flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1917             MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1918         sgel->FlagsLength = cpu_to_le32(flags_length);
1919         sgel->Address = cpu_to_le64(dma_addr);
1920 }
1921
1922 /**
1923  * _base_get_chain_buffer_tracker - obtain chain tracker
1924  * @ioc: per adapter object
1925  * @scmd: SCSI commands of the IO request
1926  *
1927  * Return: chain tracker from chain_lookup table using key as
1928  * smid and smid's chain_offset.
1929  */
1930 static struct chain_tracker *
1931 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
1932                                struct scsi_cmnd *scmd)
1933 {
1934         struct chain_tracker *chain_req;
1935         struct scsiio_tracker *st = scsi_cmd_priv(scmd);
1936         u16 smid = st->smid;
1937         u8 chain_offset =
1938            atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
1939
1940         if (chain_offset == ioc->chains_needed_per_io)
1941                 return NULL;
1942
1943         chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
1944         atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
1945         return chain_req;
1946 }
1947
1948
1949 /**
1950  * _base_build_sg - build generic sg
1951  * @ioc: per adapter object
1952  * @psge: virtual address for SGE
1953  * @data_out_dma: physical address for WRITES
1954  * @data_out_sz: data xfer size for WRITES
1955  * @data_in_dma: physical address for READS
1956  * @data_in_sz: data xfer size for READS
1957  */
1958 static void
1959 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1960         dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1961         size_t data_in_sz)
1962 {
1963         u32 sgl_flags;
1964
1965         if (!data_out_sz && !data_in_sz) {
1966                 _base_build_zero_len_sge(ioc, psge);
1967                 return;
1968         }
1969
1970         if (data_out_sz && data_in_sz) {
1971                 /* WRITE sgel first */
1972                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1973                     MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1974                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1975                 ioc->base_add_sg_single(psge, sgl_flags |
1976                     data_out_sz, data_out_dma);
1977
1978                 /* incr sgel */
1979                 psge += ioc->sge_size;
1980
1981                 /* READ sgel last */
1982                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1983                     MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1984                     MPI2_SGE_FLAGS_END_OF_LIST);
1985                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1986                 ioc->base_add_sg_single(psge, sgl_flags |
1987                     data_in_sz, data_in_dma);
1988         } else if (data_out_sz) /* WRITE */ {
1989                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1990                     MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1991                     MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1992                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1993                 ioc->base_add_sg_single(psge, sgl_flags |
1994                     data_out_sz, data_out_dma);
1995         } else if (data_in_sz) /* READ */ {
1996                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1997                     MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1998                     MPI2_SGE_FLAGS_END_OF_LIST);
1999                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2000                 ioc->base_add_sg_single(psge, sgl_flags |
2001                     data_in_sz, data_in_dma);
2002         }
2003 }
2004
2005 /* IEEE format sgls */
2006
2007 /**
2008  * _base_build_nvme_prp - This function is called for NVMe end devices to build
2009  * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
2010  * entry of the NVMe message (PRP1).  If the data buffer is small enough to be
2011  * described entirely using PRP1, then PRP2 is not used.  If needed, PRP2 is
2012  * used to describe a larger data buffer.  If the data buffer is too large to
2013  * describe using the two PRP entriess inside the NVMe message, then PRP1
2014  * describes the first data memory segment, and PRP2 contains a pointer to a PRP
2015  * list located elsewhere in memory to describe the remaining data memory
2016  * segments.  The PRP list will be contiguous.
2017  *
2018  * The native SGL for NVMe devices is a Physical Region Page (PRP).  A PRP
2019  * consists of a list of PRP entries to describe a number of noncontigous
2020  * physical memory segments as a single memory buffer, just as a SGL does.  Note
2021  * however, that this function is only used by the IOCTL call, so the memory
2022  * given will be guaranteed to be contiguous.  There is no need to translate
2023  * non-contiguous SGL into a PRP in this case.  All PRPs will describe
2024  * contiguous space that is one page size each.
2025  *
2026  * Each NVMe message contains two PRP entries.  The first (PRP1) either contains
2027  * a PRP list pointer or a PRP element, depending upon the command.  PRP2
2028  * contains the second PRP element if the memory being described fits within 2
2029  * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
2030  *
2031  * A PRP list pointer contains the address of a PRP list, structured as a linear
2032  * array of PRP entries.  Each PRP entry in this list describes a segment of
2033  * physical memory.
2034  *
2035  * Each 64-bit PRP entry comprises an address and an offset field.  The address
2036  * always points at the beginning of a 4KB physical memory page, and the offset
2037  * describes where within that 4KB page the memory segment begins.  Only the
2038  * first element in a PRP list may contain a non-zero offest, implying that all
2039  * memory segments following the first begin at the start of a 4KB page.
2040  *
2041  * Each PRP element normally describes 4KB of physical memory, with exceptions
2042  * for the first and last elements in the list.  If the memory being described
2043  * by the list begins at a non-zero offset within the first 4KB page, then the
2044  * first PRP element will contain a non-zero offset indicating where the region
2045  * begins within the 4KB page.  The last memory segment may end before the end
2046  * of the 4KB segment, depending upon the overall size of the memory being
2047  * described by the PRP list.
2048  *
2049  * Since PRP entries lack any indication of size, the overall data buffer length
2050  * is used to determine where the end of the data memory buffer is located, and
2051  * how many PRP entries are required to describe it.
2052  *
2053  * @ioc: per adapter object
2054  * @smid: system request message index for getting asscociated SGL
2055  * @nvme_encap_request: the NVMe request msg frame pointer
2056  * @data_out_dma: physical address for WRITES
2057  * @data_out_sz: data xfer size for WRITES
2058  * @data_in_dma: physical address for READS
2059  * @data_in_sz: data xfer size for READS
2060  */
2061 static void
2062 _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2063         Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
2064         dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2065         size_t data_in_sz)
2066 {
2067         int             prp_size = NVME_PRP_SIZE;
2068         __le64          *prp_entry, *prp1_entry, *prp2_entry;
2069         __le64          *prp_page;
2070         dma_addr_t      prp_entry_dma, prp_page_dma, dma_addr;
2071         u32             offset, entry_len;
2072         u32             page_mask_result, page_mask;
2073         size_t          length;
2074         struct mpt3sas_nvme_cmd *nvme_cmd =
2075                 (void *)nvme_encap_request->NVMe_Command;
2076
2077         /*
2078          * Not all commands require a data transfer. If no data, just return
2079          * without constructing any PRP.
2080          */
2081         if (!data_in_sz && !data_out_sz)
2082                 return;
2083         prp1_entry = &nvme_cmd->prp1;
2084         prp2_entry = &nvme_cmd->prp2;
2085         prp_entry = prp1_entry;
2086         /*
2087          * For the PRP entries, use the specially allocated buffer of
2088          * contiguous memory.
2089          */
2090         prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
2091         prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2092
2093         /*
2094          * Check if we are within 1 entry of a page boundary we don't
2095          * want our first entry to be a PRP List entry.
2096          */
2097         page_mask = ioc->page_size - 1;
2098         page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
2099         if (!page_mask_result) {
2100                 /* Bump up to next page boundary. */
2101                 prp_page = (__le64 *)((u8 *)prp_page + prp_size);
2102                 prp_page_dma = prp_page_dma + prp_size;
2103         }
2104
2105         /*
2106          * Set PRP physical pointer, which initially points to the current PRP
2107          * DMA memory page.
2108          */
2109         prp_entry_dma = prp_page_dma;
2110
2111         /* Get physical address and length of the data buffer. */
2112         if (data_in_sz) {
2113                 dma_addr = data_in_dma;
2114                 length = data_in_sz;
2115         } else {
2116                 dma_addr = data_out_dma;
2117                 length = data_out_sz;
2118         }
2119
2120         /* Loop while the length is not zero. */
2121         while (length) {
2122                 /*
2123                  * Check if we need to put a list pointer here if we are at
2124                  * page boundary - prp_size (8 bytes).
2125                  */
2126                 page_mask_result = (prp_entry_dma + prp_size) & page_mask;
2127                 if (!page_mask_result) {
2128                         /*
2129                          * This is the last entry in a PRP List, so we need to
2130                          * put a PRP list pointer here.  What this does is:
2131                          *   - bump the current memory pointer to the next
2132                          *     address, which will be the next full page.
2133                          *   - set the PRP Entry to point to that page.  This
2134                          *     is now the PRP List pointer.
2135                          *   - bump the PRP Entry pointer the start of the
2136                          *     next page.  Since all of this PRP memory is
2137                          *     contiguous, no need to get a new page - it's
2138                          *     just the next address.
2139                          */
2140                         prp_entry_dma++;
2141                         *prp_entry = cpu_to_le64(prp_entry_dma);
2142                         prp_entry++;
2143                 }
2144
2145                 /* Need to handle if entry will be part of a page. */
2146                 offset = dma_addr & page_mask;
2147                 entry_len = ioc->page_size - offset;
2148
2149                 if (prp_entry == prp1_entry) {
2150                         /*
2151                          * Must fill in the first PRP pointer (PRP1) before
2152                          * moving on.
2153                          */
2154                         *prp1_entry = cpu_to_le64(dma_addr);
2155
2156                         /*
2157                          * Now point to the second PRP entry within the
2158                          * command (PRP2).
2159                          */
2160                         prp_entry = prp2_entry;
2161                 } else if (prp_entry == prp2_entry) {
2162                         /*
2163                          * Should the PRP2 entry be a PRP List pointer or just
2164                          * a regular PRP pointer?  If there is more than one
2165                          * more page of data, must use a PRP List pointer.
2166                          */
2167                         if (length > ioc->page_size) {
2168                                 /*
2169                                  * PRP2 will contain a PRP List pointer because
2170                                  * more PRP's are needed with this command. The
2171                                  * list will start at the beginning of the
2172                                  * contiguous buffer.
2173                                  */
2174                                 *prp2_entry = cpu_to_le64(prp_entry_dma);
2175
2176                                 /*
2177                                  * The next PRP Entry will be the start of the
2178                                  * first PRP List.
2179                                  */
2180                                 prp_entry = prp_page;
2181                         } else {
2182                                 /*
2183                                  * After this, the PRP Entries are complete.
2184                                  * This command uses 2 PRP's and no PRP list.
2185                                  */
2186                                 *prp2_entry = cpu_to_le64(dma_addr);
2187                         }
2188                 } else {
2189                         /*
2190                          * Put entry in list and bump the addresses.
2191                          *
2192                          * After PRP1 and PRP2 are filled in, this will fill in
2193                          * all remaining PRP entries in a PRP List, one per
2194                          * each time through the loop.
2195                          */
2196                         *prp_entry = cpu_to_le64(dma_addr);
2197                         prp_entry++;
2198                         prp_entry_dma++;
2199                 }
2200
2201                 /*
2202                  * Bump the phys address of the command's data buffer by the
2203                  * entry_len.
2204                  */
2205                 dma_addr += entry_len;
2206
2207                 /* Decrement length accounting for last partial page. */
2208                 if (entry_len > length)
2209                         length = 0;
2210                 else
2211                         length -= entry_len;
2212         }
2213 }
2214
2215 /**
2216  * base_make_prp_nvme -
2217  * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
2218  *
2219  * @ioc:                per adapter object
2220  * @scmd:               SCSI command from the mid-layer
2221  * @mpi_request:        mpi request
2222  * @smid:               msg Index
2223  * @sge_count:          scatter gather element count.
2224  *
2225  * Return:              true: PRPs are built
2226  *                      false: IEEE SGLs needs to be built
2227  */
2228 static void
2229 base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2230                 struct scsi_cmnd *scmd,
2231                 Mpi25SCSIIORequest_t *mpi_request,
2232                 u16 smid, int sge_count)
2233 {
2234         int sge_len, num_prp_in_chain = 0;
2235         Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
2236         __le64 *curr_buff;
2237         dma_addr_t msg_dma, sge_addr, offset;
2238         u32 page_mask, page_mask_result;
2239         struct scatterlist *sg_scmd;
2240         u32 first_prp_len;
2241         int data_len = scsi_bufflen(scmd);
2242         u32 nvme_pg_size;
2243
2244         nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2245         /*
2246          * Nvme has a very convoluted prp format.  One prp is required
2247          * for each page or partial page. Driver need to split up OS sg_list
2248          * entries if it is longer than one page or cross a page
2249          * boundary.  Driver also have to insert a PRP list pointer entry as
2250          * the last entry in each physical page of the PRP list.
2251          *
2252          * NOTE: The first PRP "entry" is actually placed in the first
2253          * SGL entry in the main message as IEEE 64 format.  The 2nd
2254          * entry in the main message is the chain element, and the rest
2255          * of the PRP entries are built in the contiguous pcie buffer.
2256          */
2257         page_mask = nvme_pg_size - 1;
2258
2259         /*
2260          * Native SGL is needed.
2261          * Put a chain element in main message frame that points to the first
2262          * chain buffer.
2263          *
2264          * NOTE:  The ChainOffset field must be 0 when using a chain pointer to
2265          *        a native SGL.
2266          */
2267
2268         /* Set main message chain element pointer */
2269         main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2270         /*
2271          * For NVMe the chain element needs to be the 2nd SG entry in the main
2272          * message.
2273          */
2274         main_chain_element = (Mpi25IeeeSgeChain64_t *)
2275                 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2276
2277         /*
2278          * For the PRP entries, use the specially allocated buffer of
2279          * contiguous memory.  Normal chain buffers can't be used
2280          * because each chain buffer would need to be the size of an OS
2281          * page (4k).
2282          */
2283         curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2284         msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2285
2286         main_chain_element->Address = cpu_to_le64(msg_dma);
2287         main_chain_element->NextChainOffset = 0;
2288         main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2289                         MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2290                         MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2291
2292         /* Build first prp, sge need not to be page aligned*/
2293         ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2294         sg_scmd = scsi_sglist(scmd);
2295         sge_addr = sg_dma_address(sg_scmd);
2296         sge_len = sg_dma_len(sg_scmd);
2297
2298         offset = sge_addr & page_mask;
2299         first_prp_len = nvme_pg_size - offset;
2300
2301         ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2302         ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2303
2304         data_len -= first_prp_len;
2305
2306         if (sge_len > first_prp_len) {
2307                 sge_addr += first_prp_len;
2308                 sge_len -= first_prp_len;
2309         } else if (data_len && (sge_len == first_prp_len)) {
2310                 sg_scmd = sg_next(sg_scmd);
2311                 sge_addr = sg_dma_address(sg_scmd);
2312                 sge_len = sg_dma_len(sg_scmd);
2313         }
2314
2315         for (;;) {
2316                 offset = sge_addr & page_mask;
2317
2318                 /* Put PRP pointer due to page boundary*/
2319                 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2320                 if (unlikely(!page_mask_result)) {
2321                         scmd_printk(KERN_NOTICE,
2322                                 scmd, "page boundary curr_buff: 0x%p\n",
2323                                 curr_buff);
2324                         msg_dma += 8;
2325                         *curr_buff = cpu_to_le64(msg_dma);
2326                         curr_buff++;
2327                         num_prp_in_chain++;
2328                 }
2329
2330                 *curr_buff = cpu_to_le64(sge_addr);
2331                 curr_buff++;
2332                 msg_dma += 8;
2333                 num_prp_in_chain++;
2334
2335                 sge_addr += nvme_pg_size;
2336                 sge_len -= nvme_pg_size;
2337                 data_len -= nvme_pg_size;
2338
2339                 if (data_len <= 0)
2340                         break;
2341
2342                 if (sge_len > 0)
2343                         continue;
2344
2345                 sg_scmd = sg_next(sg_scmd);
2346                 sge_addr = sg_dma_address(sg_scmd);
2347                 sge_len = sg_dma_len(sg_scmd);
2348         }
2349
2350         main_chain_element->Length =
2351                 cpu_to_le32(num_prp_in_chain * sizeof(u64));
2352         return;
2353 }
2354
2355 static bool
2356 base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2357         struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2358 {
2359         u32 data_length = 0;
2360         bool build_prp = true;
2361
2362         data_length = scsi_bufflen(scmd);
2363         if (pcie_device &&
2364             (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
2365                 build_prp = false;
2366                 return build_prp;
2367         }
2368
2369         /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
2370          * we built IEEE SGL
2371          */
2372         if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2373                 build_prp = false;
2374
2375         return build_prp;
2376 }
2377
2378 /**
2379  * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
2380  * determine if the driver needs to build a native SGL.  If so, that native
2381  * SGL is built in the special contiguous buffers allocated especially for
2382  * PCIe SGL creation.  If the driver will not build a native SGL, return
2383  * TRUE and a normal IEEE SGL will be built.  Currently this routine
2384  * supports NVMe.
2385  * @ioc: per adapter object
2386  * @mpi_request: mf request pointer
2387  * @smid: system request message index
2388  * @scmd: scsi command
2389  * @pcie_device: points to the PCIe device's info
2390  *
2391  * Return: 0 if native SGL was built, 1 if no SGL was built
2392  */
2393 static int
2394 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2395         Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2396         struct _pcie_device *pcie_device)
2397 {
2398         int sges_left;
2399
2400         /* Get the SG list pointer and info. */
2401         sges_left = scsi_dma_map(scmd);
2402         if (sges_left < 0) {
2403                 sdev_printk(KERN_ERR, scmd->device,
2404                         "scsi_dma_map failed: request for %d bytes!\n",
2405                         scsi_bufflen(scmd));
2406                 return 1;
2407         }
2408
2409         /* Check if we need to build a native SG list. */
2410         if (base_is_prp_possible(ioc, pcie_device,
2411                                 scmd, sges_left) == 0) {
2412                 /* We built a native SG list, just return. */
2413                 goto out;
2414         }
2415
2416         /*
2417          * Build native NVMe PRP.
2418          */
2419         base_make_prp_nvme(ioc, scmd, mpi_request,
2420                         smid, sges_left);
2421
2422         return 0;
2423 out:
2424         scsi_dma_unmap(scmd);
2425         return 1;
2426 }
2427
2428 /**
2429  * _base_add_sg_single_ieee - add sg element for IEEE format
2430  * @paddr: virtual address for SGE
2431  * @flags: SGE flags
2432  * @chain_offset: number of 128 byte elements from start of segment
2433  * @length: data transfer length
2434  * @dma_addr: Physical address
2435  */
2436 static void
2437 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2438         dma_addr_t dma_addr)
2439 {
2440         Mpi25IeeeSgeChain64_t *sgel = paddr;
2441
2442         sgel->Flags = flags;
2443         sgel->NextChainOffset = chain_offset;
2444         sgel->Length = cpu_to_le32(length);
2445         sgel->Address = cpu_to_le64(dma_addr);
2446 }
2447
2448 /**
2449  * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
2450  * @ioc: per adapter object
2451  * @paddr: virtual address for SGE
2452  *
2453  * Create a zero length scatter gather entry to insure the IOCs hardware has
2454  * something to use if the target device goes brain dead and tries
2455  * to send data even when none is asked for.
2456  */
2457 static void
2458 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2459 {
2460         u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2461                 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2462                 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
2463
2464         _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2465 }
2466
2467 /**
2468  * _base_build_sg_scmd - main sg creation routine
2469  *              pcie_device is unused here!
2470  * @ioc: per adapter object
2471  * @scmd: scsi command
2472  * @smid: system request message index
2473  * @unused: unused pcie_device pointer
2474  * Context: none.
2475  *
2476  * The main routine that builds scatter gather table from a given
2477  * scsi request sent via the .queuecommand main handler.
2478  *
2479  * Return: 0 success, anything else error
2480  */
2481 static int
2482 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2483         struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
2484 {
2485         Mpi2SCSIIORequest_t *mpi_request;
2486         dma_addr_t chain_dma;
2487         struct scatterlist *sg_scmd;
2488         void *sg_local, *chain;
2489         u32 chain_offset;
2490         u32 chain_length;
2491         u32 chain_flags;
2492         int sges_left;
2493         u32 sges_in_segment;
2494         u32 sgl_flags;
2495         u32 sgl_flags_last_element;
2496         u32 sgl_flags_end_buffer;
2497         struct chain_tracker *chain_req;
2498
2499         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2500
2501         /* init scatter gather flags */
2502         sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2503         if (scmd->sc_data_direction == DMA_TO_DEVICE)
2504                 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2505         sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2506             << MPI2_SGE_FLAGS_SHIFT;
2507         sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2508             MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2509             << MPI2_SGE_FLAGS_SHIFT;
2510         sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2511
2512         sg_scmd = scsi_sglist(scmd);
2513         sges_left = scsi_dma_map(scmd);
2514         if (sges_left < 0) {
2515                 sdev_printk(KERN_ERR, scmd->device,
2516                  "scsi_dma_map failed: request for %d bytes!\n",
2517                  scsi_bufflen(scmd));
2518                 return -ENOMEM;
2519         }
2520
2521         sg_local = &mpi_request->SGL;
2522         sges_in_segment = ioc->max_sges_in_main_message;
2523         if (sges_left <= sges_in_segment)
2524                 goto fill_in_last_segment;
2525
2526         mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2527             (sges_in_segment * ioc->sge_size))/4;
2528
2529         /* fill in main message segment when there is a chain following */
2530         while (sges_in_segment) {
2531                 if (sges_in_segment == 1)
2532                         ioc->base_add_sg_single(sg_local,
2533                             sgl_flags_last_element | sg_dma_len(sg_scmd),
2534                             sg_dma_address(sg_scmd));
2535                 else
2536                         ioc->base_add_sg_single(sg_local, sgl_flags |
2537                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2538                 sg_scmd = sg_next(sg_scmd);
2539                 sg_local += ioc->sge_size;
2540                 sges_left--;
2541                 sges_in_segment--;
2542         }
2543
2544         /* initializing the chain flags and pointers */
2545         chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
2546         chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2547         if (!chain_req)
2548                 return -1;
2549         chain = chain_req->chain_buffer;
2550         chain_dma = chain_req->chain_buffer_dma;
2551         do {
2552                 sges_in_segment = (sges_left <=
2553                     ioc->max_sges_in_chain_message) ? sges_left :
2554                     ioc->max_sges_in_chain_message;
2555                 chain_offset = (sges_left == sges_in_segment) ?
2556                     0 : (sges_in_segment * ioc->sge_size)/4;
2557                 chain_length = sges_in_segment * ioc->sge_size;
2558                 if (chain_offset) {
2559                         chain_offset = chain_offset <<
2560                             MPI2_SGE_CHAIN_OFFSET_SHIFT;
2561                         chain_length += ioc->sge_size;
2562                 }
2563                 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2564                     chain_length, chain_dma);
2565                 sg_local = chain;
2566                 if (!chain_offset)
2567                         goto fill_in_last_segment;
2568
2569                 /* fill in chain segments */
2570                 while (sges_in_segment) {
2571                         if (sges_in_segment == 1)
2572                                 ioc->base_add_sg_single(sg_local,
2573                                     sgl_flags_last_element |
2574                                     sg_dma_len(sg_scmd),
2575                                     sg_dma_address(sg_scmd));
2576                         else
2577                                 ioc->base_add_sg_single(sg_local, sgl_flags |
2578                                     sg_dma_len(sg_scmd),
2579                                     sg_dma_address(sg_scmd));
2580                         sg_scmd = sg_next(sg_scmd);
2581                         sg_local += ioc->sge_size;
2582                         sges_left--;
2583                         sges_in_segment--;
2584                 }
2585
2586                 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2587                 if (!chain_req)
2588                         return -1;
2589                 chain = chain_req->chain_buffer;
2590                 chain_dma = chain_req->chain_buffer_dma;
2591         } while (1);
2592
2593
2594  fill_in_last_segment:
2595
2596         /* fill the last segment */
2597         while (sges_left) {
2598                 if (sges_left == 1)
2599                         ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2600                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2601                 else
2602                         ioc->base_add_sg_single(sg_local, sgl_flags |
2603                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2604                 sg_scmd = sg_next(sg_scmd);
2605                 sg_local += ioc->sge_size;
2606                 sges_left--;
2607         }
2608
2609         return 0;
2610 }
2611
2612 /**
2613  * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
2614  * @ioc: per adapter object
2615  * @scmd: scsi command
2616  * @smid: system request message index
2617  * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
2618  * constructed on need.
2619  * Context: none.
2620  *
2621  * The main routine that builds scatter gather table from a given
2622  * scsi request sent via the .queuecommand main handler.
2623  *
2624  * Return: 0 success, anything else error
2625  */
2626 static int
2627 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2628         struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
2629 {
2630         Mpi25SCSIIORequest_t *mpi_request;
2631         dma_addr_t chain_dma;
2632         struct scatterlist *sg_scmd;
2633         void *sg_local, *chain;
2634         u32 chain_offset;
2635         u32 chain_length;
2636         int sges_left;
2637         u32 sges_in_segment;
2638         u8 simple_sgl_flags;
2639         u8 simple_sgl_flags_last;
2640         u8 chain_sgl_flags;
2641         struct chain_tracker *chain_req;
2642
2643         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2644
2645         /* init scatter gather flags */
2646         simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2647             MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2648         simple_sgl_flags_last = simple_sgl_flags |
2649             MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2650         chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2651             MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2652
2653         /* Check if we need to build a native SG list. */
2654         if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2655                         smid, scmd, pcie_device) == 0)) {
2656                 /* We built a native SG list, just return. */
2657                 return 0;
2658         }
2659
2660         sg_scmd = scsi_sglist(scmd);
2661         sges_left = scsi_dma_map(scmd);
2662         if (sges_left < 0) {
2663                 sdev_printk(KERN_ERR, scmd->device,
2664                         "scsi_dma_map failed: request for %d bytes!\n",
2665                         scsi_bufflen(scmd));
2666                 return -ENOMEM;
2667         }
2668
2669         sg_local = &mpi_request->SGL;
2670         sges_in_segment = (ioc->request_sz -
2671                    offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2672         if (sges_left <= sges_in_segment)
2673                 goto fill_in_last_segment;
2674
2675         mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
2676             (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2677
2678         /* fill in main message segment when there is a chain following */
2679         while (sges_in_segment > 1) {
2680                 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2681                     sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2682                 sg_scmd = sg_next(sg_scmd);
2683                 sg_local += ioc->sge_size_ieee;
2684                 sges_left--;
2685                 sges_in_segment--;
2686         }
2687
2688         /* initializing the pointers */
2689         chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2690         if (!chain_req)
2691                 return -1;
2692         chain = chain_req->chain_buffer;
2693         chain_dma = chain_req->chain_buffer_dma;
2694         do {
2695                 sges_in_segment = (sges_left <=
2696                     ioc->max_sges_in_chain_message) ? sges_left :
2697                     ioc->max_sges_in_chain_message;
2698                 chain_offset = (sges_left == sges_in_segment) ?
2699                     0 : sges_in_segment;
2700                 chain_length = sges_in_segment * ioc->sge_size_ieee;
2701                 if (chain_offset)
2702                         chain_length += ioc->sge_size_ieee;
2703                 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2704                     chain_offset, chain_length, chain_dma);
2705
2706                 sg_local = chain;
2707                 if (!chain_offset)
2708                         goto fill_in_last_segment;
2709
2710                 /* fill in chain segments */
2711                 while (sges_in_segment) {
2712                         _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2713                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2714                         sg_scmd = sg_next(sg_scmd);
2715                         sg_local += ioc->sge_size_ieee;
2716                         sges_left--;
2717                         sges_in_segment--;
2718                 }
2719
2720                 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2721                 if (!chain_req)
2722                         return -1;
2723                 chain = chain_req->chain_buffer;
2724                 chain_dma = chain_req->chain_buffer_dma;
2725         } while (1);
2726
2727
2728  fill_in_last_segment:
2729
2730         /* fill the last segment */
2731         while (sges_left > 0) {
2732                 if (sges_left == 1)
2733                         _base_add_sg_single_ieee(sg_local,
2734                             simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2735                             sg_dma_address(sg_scmd));
2736                 else
2737                         _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2738                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2739                 sg_scmd = sg_next(sg_scmd);
2740                 sg_local += ioc->sge_size_ieee;
2741                 sges_left--;
2742         }
2743
2744         return 0;
2745 }
2746
2747 /**
2748  * _base_build_sg_ieee - build generic sg for IEEE format
2749  * @ioc: per adapter object
2750  * @psge: virtual address for SGE
2751  * @data_out_dma: physical address for WRITES
2752  * @data_out_sz: data xfer size for WRITES
2753  * @data_in_dma: physical address for READS
2754  * @data_in_sz: data xfer size for READS
2755  */
2756 static void
2757 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2758         dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2759         size_t data_in_sz)
2760 {
2761         u8 sgl_flags;
2762
2763         if (!data_out_sz && !data_in_sz) {
2764                 _base_build_zero_len_sge_ieee(ioc, psge);
2765                 return;
2766         }
2767
2768         if (data_out_sz && data_in_sz) {
2769                 /* WRITE sgel first */
2770                 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2771                     MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2772                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2773                     data_out_dma);
2774
2775                 /* incr sgel */
2776                 psge += ioc->sge_size_ieee;
2777
2778                 /* READ sgel last */
2779                 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2780                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2781                     data_in_dma);
2782         } else if (data_out_sz) /* WRITE */ {
2783                 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2784                     MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2785                     MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2786                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2787                     data_out_dma);
2788         } else if (data_in_sz) /* READ */ {
2789                 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2790                     MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2791                     MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2792                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2793                     data_in_dma);
2794         }
2795 }
2796
2797 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2798
2799 /**
2800  * _base_config_dma_addressing - set dma addressing
2801  * @ioc: per adapter object
2802  * @pdev: PCI device struct
2803  *
2804  * Return: 0 for success, non-zero for failure.
2805  */
2806 static int
2807 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2808 {
2809         struct sysinfo s;
2810         int dma_mask;
2811
2812         if (ioc->is_mcpu_endpoint ||
2813             sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
2814             dma_get_required_mask(&pdev->dev) <= 32)
2815                 dma_mask = 32;
2816         /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
2817         else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
2818                 dma_mask = 63;
2819         else
2820                 dma_mask = 64;
2821
2822         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
2823             dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)))
2824                 return -ENODEV;
2825
2826         if (dma_mask > 32) {
2827                 ioc->base_add_sg_single = &_base_add_sg_single_64;
2828                 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2829         } else {
2830                 ioc->base_add_sg_single = &_base_add_sg_single_32;
2831                 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
2832         }
2833
2834         si_meminfo(&s);
2835         ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2836                 dma_mask, convert_to_kb(s.totalram));
2837
2838         return 0;
2839 }
2840
2841 /**
2842  * _base_check_enable_msix - checks MSIX capabable.
2843  * @ioc: per adapter object
2844  *
2845  * Check to see if card is capable of MSIX, and set number
2846  * of available msix vectors
2847  */
2848 static int
2849 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2850 {
2851         int base;
2852         u16 message_control;
2853
2854         /* Check whether controller SAS2008 B0 controller,
2855          * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
2856          */
2857         if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
2858             ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
2859                 return -EINVAL;
2860         }
2861
2862         base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2863         if (!base) {
2864                 dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
2865                 return -EINVAL;
2866         }
2867
2868         /* get msix vector count */
2869         /* NUMA_IO not supported for older controllers */
2870         if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
2871             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
2872             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
2873             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
2874             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
2875             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
2876             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
2877                 ioc->msix_vector_count = 1;
2878         else {
2879                 pci_read_config_word(ioc->pdev, base + 2, &message_control);
2880                 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2881         }
2882         dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
2883                                   ioc->msix_vector_count));
2884         return 0;
2885 }
2886
2887 /**
2888  * _base_free_irq - free irq
2889  * @ioc: per adapter object
2890  *
2891  * Freeing respective reply_queue from the list.
2892  */
2893 static void
2894 _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
2895 {
2896         struct adapter_reply_queue *reply_q, *next;
2897
2898         if (list_empty(&ioc->reply_queue_list))
2899                 return;
2900
2901         list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
2902                 list_del(&reply_q->list);
2903                 if (ioc->smp_affinity_enable)
2904                         irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
2905                             reply_q->msix_index), NULL);
2906                 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
2907                          reply_q);
2908                 kfree(reply_q);
2909         }
2910 }
2911
2912 /**
2913  * _base_request_irq - request irq
2914  * @ioc: per adapter object
2915  * @index: msix index into vector table
2916  *
2917  * Inserting respective reply_queue into the list.
2918  */
2919 static int
2920 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
2921 {
2922         struct pci_dev *pdev = ioc->pdev;
2923         struct adapter_reply_queue *reply_q;
2924         int r;
2925
2926         reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
2927         if (!reply_q) {
2928                 ioc_err(ioc, "unable to allocate memory %zu!\n",
2929                         sizeof(struct adapter_reply_queue));
2930                 return -ENOMEM;
2931         }
2932         reply_q->ioc = ioc;
2933         reply_q->msix_index = index;
2934
2935         atomic_set(&reply_q->busy, 0);
2936         if (ioc->msix_enable)
2937                 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
2938                     ioc->driver_name, ioc->id, index);
2939         else
2940                 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
2941                     ioc->driver_name, ioc->id);
2942         r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
2943                         IRQF_SHARED, reply_q->name, reply_q);
2944         if (r) {
2945                 pr_err("%s: unable to allocate interrupt %d!\n",
2946                        reply_q->name, pci_irq_vector(pdev, index));
2947                 kfree(reply_q);
2948                 return -EBUSY;
2949         }
2950
2951         INIT_LIST_HEAD(&reply_q->list);
2952         list_add_tail(&reply_q->list, &ioc->reply_queue_list);
2953         return 0;
2954 }
2955
2956 /**
2957  * _base_assign_reply_queues - assigning msix index for each cpu
2958  * @ioc: per adapter object
2959  *
2960  * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
2961  *
2962  * It would nice if we could call irq_set_affinity, however it is not
2963  * an exported symbol
2964  */
2965 static void
2966 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
2967 {
2968         unsigned int cpu, nr_cpus, nr_msix, index = 0;
2969         struct adapter_reply_queue *reply_q;
2970         int local_numa_node;
2971
2972         if (!_base_is_controller_msix_enabled(ioc))
2973                 return;
2974
2975         if (ioc->msix_load_balance)
2976                 return;
2977
2978         memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
2979
2980         nr_cpus = num_online_cpus();
2981         nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
2982                                                ioc->facts.MaxMSIxVectors);
2983         if (!nr_msix)
2984                 return;
2985
2986         if (ioc->smp_affinity_enable) {
2987
2988                 /*
2989                  * set irq affinity to local numa node for those irqs
2990                  * corresponding to high iops queues.
2991                  */
2992                 if (ioc->high_iops_queues) {
2993                         local_numa_node = dev_to_node(&ioc->pdev->dev);
2994                         for (index = 0; index < ioc->high_iops_queues;
2995                             index++) {
2996                                 irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
2997                                     index), cpumask_of_node(local_numa_node));
2998                         }
2999                 }
3000
3001                 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3002                         const cpumask_t *mask;
3003
3004                         if (reply_q->msix_index < ioc->high_iops_queues)
3005                                 continue;
3006
3007                         mask = pci_irq_get_affinity(ioc->pdev,
3008                             reply_q->msix_index);
3009                         if (!mask) {
3010                                 ioc_warn(ioc, "no affinity for msi %x\n",
3011                                          reply_q->msix_index);
3012                                 goto fall_back;
3013                         }
3014
3015                         for_each_cpu_and(cpu, mask, cpu_online_mask) {
3016                                 if (cpu >= ioc->cpu_msix_table_sz)
3017                                         break;
3018                                 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3019                         }
3020                 }
3021                 return;
3022         }
3023
3024 fall_back:
3025         cpu = cpumask_first(cpu_online_mask);
3026         nr_msix -= ioc->high_iops_queues;
3027         index = 0;
3028
3029         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3030                 unsigned int i, group = nr_cpus / nr_msix;
3031
3032                 if (reply_q->msix_index < ioc->high_iops_queues)
3033                         continue;
3034
3035                 if (cpu >= nr_cpus)
3036                         break;
3037
3038                 if (index < nr_cpus % nr_msix)
3039                         group++;
3040
3041                 for (i = 0 ; i < group ; i++) {
3042                         ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3043                         cpu = cpumask_next(cpu, cpu_online_mask);
3044                 }
3045                 index++;
3046         }
3047 }
3048
3049 /**
3050  * _base_check_and_enable_high_iops_queues - enable high iops mode
3051  * @ ioc - per adapter object
3052  * @ hba_msix_vector_count - msix vectors supported by HBA
3053  *
3054  * Enable high iops queues only if
3055  *  - HBA is a SEA/AERO controller and
3056  *  - MSI-Xs vector supported by the HBA is 128 and
3057  *  - total CPU count in the system >=16 and
3058  *  - loaded driver with default max_msix_vectors module parameter and
3059  *  - system booted in non kdump mode
3060  *
3061  * returns nothing.
3062  */
3063 static void
3064 _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
3065                 int hba_msix_vector_count)
3066 {
3067         u16 lnksta, speed;
3068
3069         if (perf_mode == MPT_PERF_MODE_IOPS ||
3070             perf_mode == MPT_PERF_MODE_LATENCY) {
3071                 ioc->high_iops_queues = 0;
3072                 return;
3073         }
3074
3075         if (perf_mode == MPT_PERF_MODE_DEFAULT) {
3076
3077                 pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
3078                 speed = lnksta & PCI_EXP_LNKSTA_CLS;
3079
3080                 if (speed < 0x4) {
3081                         ioc->high_iops_queues = 0;
3082                         return;
3083                 }
3084         }
3085
3086         if (!reset_devices && ioc->is_aero_ioc &&
3087             hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
3088             num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
3089             max_msix_vectors == -1)
3090                 ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
3091         else
3092                 ioc->high_iops_queues = 0;
3093 }
3094
3095 /**
3096  * _base_disable_msix - disables msix
3097  * @ioc: per adapter object
3098  *
3099  */
3100 static void
3101 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
3102 {
3103         if (!ioc->msix_enable)
3104                 return;
3105         pci_free_irq_vectors(ioc->pdev);
3106         ioc->msix_enable = 0;
3107 }
3108
3109 /**
3110  * _base_alloc_irq_vectors - allocate msix vectors
3111  * @ioc: per adapter object
3112  *
3113  */
3114 static int
3115 _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
3116 {
3117         int i, irq_flags = PCI_IRQ_MSIX;
3118         struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
3119         struct irq_affinity *descp = &desc;
3120
3121         if (ioc->smp_affinity_enable)
3122                 irq_flags |= PCI_IRQ_AFFINITY;
3123         else
3124                 descp = NULL;
3125
3126         ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
3127             ioc->reply_queue_count);
3128
3129         i = pci_alloc_irq_vectors_affinity(ioc->pdev,
3130             ioc->high_iops_queues,
3131             ioc->reply_queue_count, irq_flags, descp);
3132
3133         return i;
3134 }
3135
3136 /**
3137  * _base_enable_msix - enables msix, failback to io_apic
3138  * @ioc: per adapter object
3139  *
3140  */
3141 static int
3142 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3143 {
3144         int r;
3145         int i, local_max_msix_vectors;
3146         u8 try_msix = 0;
3147
3148         ioc->msix_load_balance = false;
3149
3150         if (msix_disable == -1 || msix_disable == 0)
3151                 try_msix = 1;
3152
3153         if (!try_msix)
3154                 goto try_ioapic;
3155
3156         if (_base_check_enable_msix(ioc) != 0)
3157                 goto try_ioapic;
3158
3159         ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
3160         pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
3161                 ioc->cpu_count, max_msix_vectors);
3162         if (ioc->is_aero_ioc)
3163                 _base_check_and_enable_high_iops_queues(ioc,
3164                         ioc->msix_vector_count);
3165         ioc->reply_queue_count =
3166                 min_t(int, ioc->cpu_count + ioc->high_iops_queues,
3167                 ioc->msix_vector_count);
3168
3169         if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
3170                 local_max_msix_vectors = (reset_devices) ? 1 : 8;
3171         else
3172                 local_max_msix_vectors = max_msix_vectors;
3173
3174         if (local_max_msix_vectors > 0)
3175                 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
3176                         ioc->reply_queue_count);
3177         else if (local_max_msix_vectors == 0)
3178                 goto try_ioapic;
3179
3180         /*
3181          * Enable msix_load_balance only if combined reply queue mode is
3182          * disabled on SAS3 & above generation HBA devices.
3183          */
3184         if (!ioc->combined_reply_queue &&
3185             ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3186                 ioc_info(ioc,
3187                     "combined ReplyQueue is off, Enabling msix load balance\n");
3188                 ioc->msix_load_balance = true;
3189         }
3190
3191         /*
3192          * smp affinity setting is not need when msix load balance
3193          * is enabled.
3194          */
3195         if (ioc->msix_load_balance)
3196                 ioc->smp_affinity_enable = 0;
3197
3198         r = _base_alloc_irq_vectors(ioc);
3199         if (r < 0) {
3200                 ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
3201                 goto try_ioapic;
3202         }
3203
3204         ioc->msix_enable = 1;
3205         ioc->reply_queue_count = r;
3206         for (i = 0; i < ioc->reply_queue_count; i++) {
3207                 r = _base_request_irq(ioc, i);
3208                 if (r) {
3209                         _base_free_irq(ioc);
3210                         _base_disable_msix(ioc);
3211                         goto try_ioapic;
3212                 }
3213         }
3214
3215         ioc_info(ioc, "High IOPs queues : %s\n",
3216                         ioc->high_iops_queues ? "enabled" : "disabled");
3217
3218         return 0;
3219
3220 /* failback to io_apic interrupt routing */
3221  try_ioapic:
3222         ioc->high_iops_queues = 0;
3223         ioc_info(ioc, "High IOPs queues : disabled\n");
3224         ioc->reply_queue_count = 1;
3225         r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
3226         if (r < 0) {
3227                 dfailprintk(ioc,
3228                             ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
3229                                      r));
3230         } else
3231                 r = _base_request_irq(ioc, 0);
3232
3233         return r;
3234 }
3235
3236 /**
3237  * mpt3sas_base_unmap_resources - free controller resources
3238  * @ioc: per adapter object
3239  */
3240 static void
3241 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
3242 {
3243         struct pci_dev *pdev = ioc->pdev;
3244
3245         dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3246
3247         _base_free_irq(ioc);
3248         _base_disable_msix(ioc);
3249
3250         kfree(ioc->replyPostRegisterIndex);
3251         ioc->replyPostRegisterIndex = NULL;
3252
3253
3254         if (ioc->chip_phys) {
3255                 iounmap(ioc->chip);
3256                 ioc->chip_phys = 0;
3257         }
3258
3259         if (pci_is_enabled(pdev)) {
3260                 pci_release_selected_regions(ioc->pdev, ioc->bars);
3261                 pci_disable_pcie_error_reporting(pdev);
3262                 pci_disable_device(pdev);
3263         }
3264 }
3265
3266 static int
3267 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3268
3269 /**
3270  * _base_check_for_fault_and_issue_reset - check if IOC is in fault state
3271  *     and if it is in fault state then issue diag reset.
3272  * @ioc: per adapter object
3273  *
3274  * Returns: 0 for success, non-zero for failure.
3275  */
3276 static int
3277 _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
3278 {
3279         u32 ioc_state;
3280         int rc = -EFAULT;
3281
3282         dinitprintk(ioc, pr_info("%s\n", __func__));
3283         if (ioc->pci_error_recovery)
3284                 return 0;
3285         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3286         dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
3287
3288         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3289                 mpt3sas_print_fault_code(ioc, ioc_state &
3290                     MPI2_DOORBELL_DATA_MASK);
3291                 rc = _base_diag_reset(ioc);
3292         } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3293             MPI2_IOC_STATE_COREDUMP) {
3294                 mpt3sas_print_coredump_info(ioc, ioc_state &
3295                      MPI2_DOORBELL_DATA_MASK);
3296                 mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
3297                 rc = _base_diag_reset(ioc);
3298         }
3299
3300         return rc;
3301 }
3302
3303 /**
3304  * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
3305  * @ioc: per adapter object
3306  *
3307  * Return: 0 for success, non-zero for failure.
3308  */
3309 int
3310 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3311 {
3312         struct pci_dev *pdev = ioc->pdev;
3313         u32 memap_sz;
3314         u32 pio_sz;
3315         int i, r = 0, rc;
3316         u64 pio_chip = 0;
3317         phys_addr_t chip_phys = 0;
3318         struct adapter_reply_queue *reply_q;
3319
3320         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3321
3322         ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3323         if (pci_enable_device_mem(pdev)) {
3324                 ioc_warn(ioc, "pci_enable_device_mem: failed\n");
3325                 ioc->bars = 0;
3326                 return -ENODEV;
3327         }
3328
3329
3330         if (pci_request_selected_regions(pdev, ioc->bars,
3331             ioc->driver_name)) {
3332                 ioc_warn(ioc, "pci_request_selected_regions: failed\n");
3333                 ioc->bars = 0;
3334                 r = -ENODEV;
3335                 goto out_fail;
3336         }
3337
3338 /* AER (Advanced Error Reporting) hooks */
3339         pci_enable_pcie_error_reporting(pdev);
3340
3341         pci_set_master(pdev);
3342
3343
3344         if (_base_config_dma_addressing(ioc, pdev) != 0) {
3345                 ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
3346                 r = -ENODEV;
3347                 goto out_fail;
3348         }
3349
3350         for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
3351              (!memap_sz || !pio_sz); i++) {
3352                 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
3353                         if (pio_sz)
3354                                 continue;
3355                         pio_chip = (u64)pci_resource_start(pdev, i);
3356                         pio_sz = pci_resource_len(pdev, i);
3357                 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3358                         if (memap_sz)
3359                                 continue;
3360                         ioc->chip_phys = pci_resource_start(pdev, i);
3361                         chip_phys = ioc->chip_phys;
3362                         memap_sz = pci_resource_len(pdev, i);
3363                         ioc->chip = ioremap(ioc->chip_phys, memap_sz);
3364                 }
3365         }
3366
3367         if (ioc->chip == NULL) {
3368                 ioc_err(ioc,
3369                     "unable to map adapter memory! or resource not found\n");
3370                 r = -EINVAL;
3371                 goto out_fail;
3372         }
3373
3374         _base_mask_interrupts(ioc);
3375
3376         r = _base_get_ioc_facts(ioc);
3377         if (r) {
3378                 rc = _base_check_for_fault_and_issue_reset(ioc);
3379                 if (rc || (_base_get_ioc_facts(ioc)))
3380                         goto out_fail;
3381         }
3382
3383         if (!ioc->rdpq_array_enable_assigned) {
3384                 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3385                 ioc->rdpq_array_enable_assigned = 1;
3386         }
3387
3388         r = _base_enable_msix(ioc);
3389         if (r)
3390                 goto out_fail;
3391
3392         if (!ioc->is_driver_loading)
3393                 _base_init_irqpolls(ioc);
3394         /* Use the Combined reply queue feature only for SAS3 C0 & higher
3395          * revision HBAs and also only when reply queue count is greater than 8
3396          */
3397         if (ioc->combined_reply_queue) {
3398                 /* Determine the Supplemental Reply Post Host Index Registers
3399                  * Addresse. Supplemental Reply Post Host Index Registers
3400                  * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
3401                  * each register is at offset bytes of
3402                  * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
3403                  */
3404                 ioc->replyPostRegisterIndex = kcalloc(
3405                      ioc->combined_reply_index_count,
3406                      sizeof(resource_size_t *), GFP_KERNEL);
3407                 if (!ioc->replyPostRegisterIndex) {
3408                         ioc_err(ioc,
3409                             "allocation for replyPostRegisterIndex failed!\n");
3410                         r = -ENOMEM;
3411                         goto out_fail;
3412                 }
3413
3414                 for (i = 0; i < ioc->combined_reply_index_count; i++) {
3415                         ioc->replyPostRegisterIndex[i] = (resource_size_t *)
3416                              ((u8 __force *)&ioc->chip->Doorbell +
3417                              MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3418                              (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3419                 }
3420         }
3421
3422         if (ioc->is_warpdrive) {
3423                 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3424                     &ioc->chip->ReplyPostHostIndex;
3425
3426                 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3427                         ioc->reply_post_host_index[i] =
3428                         (resource_size_t __iomem *)
3429                         ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3430                         * 4)));
3431         }
3432
3433         list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
3434                 pr_info("%s: %s enabled: IRQ %d\n",
3435                         reply_q->name,
3436                         ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3437                         pci_irq_vector(ioc->pdev, reply_q->msix_index));
3438
3439         ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3440                  &chip_phys, ioc->chip, memap_sz);
3441         ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3442                  (unsigned long long)pio_chip, pio_sz);
3443
3444         /* Save PCI configuration state for recovery from PCI AER/EEH errors */
3445         pci_save_state(pdev);
3446         return 0;
3447
3448  out_fail:
3449         mpt3sas_base_unmap_resources(ioc);
3450         return r;
3451 }
3452
3453 /**
3454  * mpt3sas_base_get_msg_frame - obtain request mf pointer
3455  * @ioc: per adapter object
3456  * @smid: system request message index(smid zero is invalid)
3457  *
3458  * Return: virt pointer to message frame.
3459  */
3460 void *
3461 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3462 {
3463         return (void *)(ioc->request + (smid * ioc->request_sz));
3464 }
3465
3466 /**
3467  * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
3468  * @ioc: per adapter object
3469  * @smid: system request message index
3470  *
3471  * Return: virt pointer to sense buffer.
3472  */
3473 void *
3474 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3475 {
3476         return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3477 }
3478
3479 /**
3480  * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
3481  * @ioc: per adapter object
3482  * @smid: system request message index
3483  *
3484  * Return: phys pointer to the low 32bit address of the sense buffer.
3485  */
3486 __le32
3487 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3488 {
3489         return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3490             SCSI_SENSE_BUFFERSIZE));
3491 }
3492
3493 /**
3494  * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
3495  * @ioc: per adapter object
3496  * @smid: system request message index
3497  *
3498  * Return: virt pointer to a PCIe SGL.
3499  */
3500 void *
3501 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3502 {
3503         return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3504 }
3505
3506 /**
3507  * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
3508  * @ioc: per adapter object
3509  * @smid: system request message index
3510  *
3511  * Return: phys pointer to the address of the PCIe buffer.
3512  */
3513 dma_addr_t
3514 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3515 {
3516         return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3517 }
3518
3519 /**
3520  * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
3521  * @ioc: per adapter object
3522  * @phys_addr: lower 32 physical addr of the reply
3523  *
3524  * Converts 32bit lower physical addr into a virt address.
3525  */
3526 void *
3527 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3528 {
3529         if (!phys_addr)
3530                 return NULL;
3531         return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3532 }
3533
3534 /**
3535  * _base_get_msix_index - get the msix index
3536  * @ioc: per adapter object
3537  * @scmd: scsi_cmnd object
3538  *
3539  * returns msix index of general reply queues,
3540  * i.e. reply queue on which IO request's reply
3541  * should be posted by the HBA firmware.
3542  */
3543 static inline u8
3544 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
3545         struct scsi_cmnd *scmd)
3546 {
3547         /* Enables reply_queue load balancing */
3548         if (ioc->msix_load_balance)
3549                 return ioc->reply_queue_count ?
3550                     base_mod64(atomic64_add_return(1,
3551                     &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
3552
3553         return ioc->cpu_msix_table[raw_smp_processor_id()];
3554 }
3555
3556 /**
3557  * _base_sdev_nr_inflight_request -get number of inflight requests
3558  *                                 of a request queue.
3559  * @q: request_queue object
3560  *
3561  * returns number of inflight request of a request queue.
3562  */
3563 inline unsigned long
3564 _base_sdev_nr_inflight_request(struct request_queue *q)
3565 {
3566         struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[0];
3567
3568         return atomic_read(&hctx->nr_active);
3569 }
3570
3571
3572 /**
3573  * _base_get_high_iops_msix_index - get the msix index of
3574  *                              high iops queues
3575  * @ioc: per adapter object
3576  * @scmd: scsi_cmnd object
3577  *
3578  * Returns: msix index of high iops reply queues.
3579  * i.e. high iops reply queue on which IO request's
3580  * reply should be posted by the HBA firmware.
3581  */
3582 static inline u8
3583 _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
3584         struct scsi_cmnd *scmd)
3585 {
3586         /**
3587          * Round robin the IO interrupts among the high iops
3588          * reply queues in terms of batch count 16 when outstanding
3589          * IOs on the target device is >=8.
3590          */
3591         if (_base_sdev_nr_inflight_request(scmd->device->request_queue) >
3592             MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
3593                 return base_mod64((
3594                     atomic64_add_return(1, &ioc->high_iops_outstanding) /
3595                     MPT3SAS_HIGH_IOPS_BATCH_COUNT),
3596                     MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
3597
3598         return _base_get_msix_index(ioc, scmd);
3599 }
3600
3601 /**
3602  * mpt3sas_base_get_smid - obtain a free smid from internal queue
3603  * @ioc: per adapter object
3604  * @cb_idx: callback index
3605  *
3606  * Return: smid (zero is invalid)
3607  */
3608 u16
3609 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3610 {
3611         unsigned long flags;
3612         struct request_tracker *request;
3613         u16 smid;
3614
3615         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3616         if (list_empty(&ioc->internal_free_list)) {
3617                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3618                 ioc_err(ioc, "%s: smid not available\n", __func__);
3619                 return 0;
3620         }
3621
3622         request = list_entry(ioc->internal_free_list.next,
3623             struct request_tracker, tracker_list);
3624         request->cb_idx = cb_idx;
3625         smid = request->smid;
3626         list_del(&request->tracker_list);
3627         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3628         return smid;
3629 }
3630
3631 /**
3632  * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
3633  * @ioc: per adapter object
3634  * @cb_idx: callback index
3635  * @scmd: pointer to scsi command object
3636  *
3637  * Return: smid (zero is invalid)
3638  */
3639 u16
3640 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3641         struct scsi_cmnd *scmd)
3642 {
3643         struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3644         unsigned int tag = scmd->request->tag;
3645         u16 smid;
3646
3647         smid = tag + 1;
3648         request->cb_idx = cb_idx;
3649         request->smid = smid;
3650         request->scmd = scmd;
3651         INIT_LIST_HEAD(&request->chain_list);
3652         return smid;
3653 }
3654
3655 /**
3656  * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
3657  * @ioc: per adapter object
3658  * @cb_idx: callback index
3659  *
3660  * Return: smid (zero is invalid)
3661  */
3662 u16
3663 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3664 {
3665         unsigned long flags;
3666         struct request_tracker *request;
3667         u16 smid;
3668
3669         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3670         if (list_empty(&ioc->hpr_free_list)) {
3671                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3672                 return 0;
3673         }
3674
3675         request = list_entry(ioc->hpr_free_list.next,
3676             struct request_tracker, tracker_list);
3677         request->cb_idx = cb_idx;
3678         smid = request->smid;
3679         list_del(&request->tracker_list);
3680         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3681         return smid;
3682 }
3683
3684 static void
3685 _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3686 {
3687         /*
3688          * See _wait_for_commands_to_complete() call with regards to this code.
3689          */
3690         if (ioc->shost_recovery && ioc->pending_io_count) {
3691                 ioc->pending_io_count = scsi_host_busy(ioc->shost);
3692                 if (ioc->pending_io_count == 0)
3693                         wake_up(&ioc->reset_wq);
3694         }
3695 }
3696
3697 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3698                            struct scsiio_tracker *st)
3699 {
3700         if (WARN_ON(st->smid == 0))
3701                 return;
3702         st->cb_idx = 0xFF;
3703         st->direct_io = 0;
3704         st->scmd = NULL;
3705         atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
3706         st->smid = 0;
3707 }
3708
3709 /**
3710  * mpt3sas_base_free_smid - put smid back on free_list
3711  * @ioc: per adapter object
3712  * @smid: system request message index
3713  */
3714 void
3715 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3716 {
3717         unsigned long flags;
3718         int i;
3719
3720         if (smid < ioc->hi_priority_smid) {
3721                 struct scsiio_tracker *st;
3722                 void *request;
3723
3724                 st = _get_st_from_smid(ioc, smid);
3725                 if (!st) {
3726                         _base_recovery_check(ioc);
3727                         return;
3728                 }
3729
3730                 /* Clear MPI request frame */
3731                 request = mpt3sas_base_get_msg_frame(ioc, smid);
3732                 memset(request, 0, ioc->request_sz);
3733
3734                 mpt3sas_base_clear_st(ioc, st);
3735                 _base_recovery_check(ioc);
3736                 return;
3737         }
3738
3739         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3740         if (smid < ioc->internal_smid) {
3741                 /* hi-priority */
3742                 i = smid - ioc->hi_priority_smid;
3743                 ioc->hpr_lookup[i].cb_idx = 0xFF;
3744                 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
3745         } else if (smid <= ioc->hba_queue_depth) {
3746                 /* internal queue */
3747                 i = smid - ioc->internal_smid;
3748                 ioc->internal_lookup[i].cb_idx = 0xFF;
3749                 list_add(&ioc->internal_lookup[i].tracker_list,
3750                     &ioc->internal_free_list);
3751         }
3752         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3753 }
3754
3755 /**
3756  * _base_mpi_ep_writeq - 32 bit write to MMIO
3757  * @b: data payload
3758  * @addr: address in MMIO space
3759  * @writeq_lock: spin lock
3760  *
3761  * This special handling for MPI EP to take care of 32 bit
3762  * environment where its not quarenteed to send the entire word
3763  * in one transfer.
3764  */
3765 static inline void
3766 _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3767                                         spinlock_t *writeq_lock)
3768 {
3769         unsigned long flags;
3770
3771         spin_lock_irqsave(writeq_lock, flags);
3772         __raw_writel((u32)(b), addr);
3773         __raw_writel((u32)(b >> 32), (addr + 4));
3774         spin_unlock_irqrestore(writeq_lock, flags);
3775 }
3776
3777 /**
3778  * _base_writeq - 64 bit write to MMIO
3779  * @b: data payload
3780  * @addr: address in MMIO space
3781  * @writeq_lock: spin lock
3782  *
3783  * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
3784  * care of 32 bit environment where its not quarenteed to send the entire word
3785  * in one transfer.
3786  */
3787 #if defined(writeq) && defined(CONFIG_64BIT)
3788 static inline void
3789 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3790 {
3791         wmb();
3792         __raw_writeq(b, addr);
3793         barrier();
3794 }
3795 #else
3796 static inline void
3797 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3798 {
3799         _base_mpi_ep_writeq(b, addr, writeq_lock);
3800 }
3801 #endif
3802
3803 /**
3804  * _base_set_and_get_msix_index - get the msix index and assign to msix_io
3805  *                                variable of scsi tracker
3806  * @ioc: per adapter object
3807  * @smid: system request message index
3808  *
3809  * returns msix index.
3810  */
3811 static u8
3812 _base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3813 {
3814         struct scsiio_tracker *st = NULL;
3815
3816         if (smid < ioc->hi_priority_smid)
3817                 st = _get_st_from_smid(ioc, smid);
3818
3819         if (st == NULL)
3820                 return  _base_get_msix_index(ioc, NULL);
3821
3822         st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
3823         return st->msix_io;
3824 }
3825
3826 /**
3827  * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
3828  * @ioc: per adapter object
3829  * @smid: system request message index
3830  * @handle: device handle
3831  */
3832 static void
3833 _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
3834         u16 smid, u16 handle)
3835 {
3836         Mpi2RequestDescriptorUnion_t descriptor;
3837         u64 *request = (u64 *)&descriptor;
3838         void *mpi_req_iomem;
3839         __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3840
3841         _clone_sg_entries(ioc, (void *) mfp, smid);
3842         mpi_req_iomem = (void __force *)ioc->chip +
3843                         MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3844         _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3845                                         ioc->request_sz);
3846         descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3847         descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3848         descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3849         descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3850         descriptor.SCSIIO.LMID = 0;
3851         _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3852             &ioc->scsi_lookup_lock);
3853 }
3854
3855 /**
3856  * _base_put_smid_scsi_io - send SCSI_IO request to firmware
3857  * @ioc: per adapter object
3858  * @smid: system request message index
3859  * @handle: device handle
3860  */
3861 static void
3862 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3863 {
3864         Mpi2RequestDescriptorUnion_t descriptor;
3865         u64 *request = (u64 *)&descriptor;
3866
3867
3868         descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3869         descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3870         descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3871         descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3872         descriptor.SCSIIO.LMID = 0;
3873         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3874             &ioc->scsi_lookup_lock);
3875 }
3876
3877 /**
3878  * _base_put_smid_fast_path - send fast path request to firmware
3879  * @ioc: per adapter object
3880  * @smid: system request message index
3881  * @handle: device handle
3882  */
3883 static void
3884 _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3885         u16 handle)
3886 {
3887         Mpi2RequestDescriptorUnion_t descriptor;
3888         u64 *request = (u64 *)&descriptor;
3889
3890         descriptor.SCSIIO.RequestFlags =
3891             MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
3892         descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3893         descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3894         descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3895         descriptor.SCSIIO.LMID = 0;
3896         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3897             &ioc->scsi_lookup_lock);
3898 }
3899
3900 /**
3901  * _base_put_smid_hi_priority - send Task Management request to firmware
3902  * @ioc: per adapter object
3903  * @smid: system request message index
3904  * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
3905  */
3906 static void
3907 _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3908         u16 msix_task)
3909 {
3910         Mpi2RequestDescriptorUnion_t descriptor;
3911         void *mpi_req_iomem;
3912         u64 *request;
3913
3914         if (ioc->is_mcpu_endpoint) {
3915                 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3916
3917                 /* TBD 256 is offset within sys register. */
3918                 mpi_req_iomem = (void __force *)ioc->chip
3919                                         + MPI_FRAME_START_OFFSET
3920                                         + (smid * ioc->request_sz);
3921                 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3922                                                         ioc->request_sz);
3923         }
3924
3925         request = (u64 *)&descriptor;
3926
3927         descriptor.HighPriority.RequestFlags =
3928             MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3929         descriptor.HighPriority.MSIxIndex =  msix_task;
3930         descriptor.HighPriority.SMID = cpu_to_le16(smid);
3931         descriptor.HighPriority.LMID = 0;
3932         descriptor.HighPriority.Reserved1 = 0;
3933         if (ioc->is_mcpu_endpoint)
3934                 _base_mpi_ep_writeq(*request,
3935                                 &ioc->chip->RequestDescriptorPostLow,
3936                                 &ioc->scsi_lookup_lock);
3937         else
3938                 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3939                     &ioc->scsi_lookup_lock);
3940 }
3941
3942 /**
3943  * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
3944  *  firmware
3945  * @ioc: per adapter object
3946  * @smid: system request message index
3947  */
3948 void
3949 mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3950 {
3951         Mpi2RequestDescriptorUnion_t descriptor;
3952         u64 *request = (u64 *)&descriptor;
3953
3954         descriptor.Default.RequestFlags =
3955                 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
3956         descriptor.Default.MSIxIndex =  _base_set_and_get_msix_index(ioc, smid);
3957         descriptor.Default.SMID = cpu_to_le16(smid);
3958         descriptor.Default.LMID = 0;
3959         descriptor.Default.DescriptorTypeDependent = 0;
3960         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3961             &ioc->scsi_lookup_lock);
3962 }
3963
3964 /**
3965  * _base_put_smid_default - Default, primarily used for config pages
3966  * @ioc: per adapter object
3967  * @smid: system request message index
3968  */
3969 static void
3970 _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3971 {
3972         Mpi2RequestDescriptorUnion_t descriptor;
3973         void *mpi_req_iomem;
3974         u64 *request;
3975
3976         if (ioc->is_mcpu_endpoint) {
3977                 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3978
3979                 _clone_sg_entries(ioc, (void *) mfp, smid);
3980                 /* TBD 256 is offset within sys register */
3981                 mpi_req_iomem = (void __force *)ioc->chip +
3982                         MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3983                 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3984                                                         ioc->request_sz);
3985         }
3986         request = (u64 *)&descriptor;
3987         descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3988         descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3989         descriptor.Default.SMID = cpu_to_le16(smid);
3990         descriptor.Default.LMID = 0;
3991         descriptor.Default.DescriptorTypeDependent = 0;
3992         if (ioc->is_mcpu_endpoint)
3993                 _base_mpi_ep_writeq(*request,
3994                                 &ioc->chip->RequestDescriptorPostLow,
3995                                 &ioc->scsi_lookup_lock);
3996         else
3997                 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3998                                 &ioc->scsi_lookup_lock);
3999 }
4000
4001 /**
4002  * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
4003  *   Atomic Request Descriptor
4004  * @ioc: per adapter object
4005  * @smid: system request message index
4006  * @handle: device handle, unused in this function, for function type match
4007  *
4008  * Return nothing.
4009  */
4010 static void
4011 _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4012         u16 handle)
4013 {
4014         Mpi26AtomicRequestDescriptor_t descriptor;
4015         u32 *request = (u32 *)&descriptor;
4016
4017         descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4018         descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4019         descriptor.SMID = cpu_to_le16(smid);
4020
4021         writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4022 }
4023
4024 /**
4025  * _base_put_smid_fast_path_atomic - send fast path request to firmware
4026  * using Atomic Request Descriptor
4027  * @ioc: per adapter object
4028  * @smid: system request message index
4029  * @handle: device handle, unused in this function, for function type match
4030  * Return nothing
4031  */
4032 static void
4033 _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4034         u16 handle)
4035 {
4036         Mpi26AtomicRequestDescriptor_t descriptor;
4037         u32 *request = (u32 *)&descriptor;
4038
4039         descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
4040         descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4041         descriptor.SMID = cpu_to_le16(smid);
4042
4043         writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4044 }
4045
4046 /**
4047  * _base_put_smid_hi_priority_atomic - send Task Management request to
4048  * firmware using Atomic Request Descriptor
4049  * @ioc: per adapter object
4050  * @smid: system request message index
4051  * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
4052  *
4053  * Return nothing.
4054  */
4055 static void
4056 _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4057         u16 msix_task)
4058 {
4059         Mpi26AtomicRequestDescriptor_t descriptor;
4060         u32 *request = (u32 *)&descriptor;
4061
4062         descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4063         descriptor.MSIxIndex = msix_task;
4064         descriptor.SMID = cpu_to_le16(smid);
4065
4066         writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4067 }
4068
4069 /**
4070  * _base_put_smid_default - Default, primarily used for config pages
4071  * use Atomic Request Descriptor
4072  * @ioc: per adapter object
4073  * @smid: system request message index
4074  *
4075  * Return nothing.
4076  */
4077 static void
4078 _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4079 {
4080         Mpi26AtomicRequestDescriptor_t descriptor;
4081         u32 *request = (u32 *)&descriptor;
4082
4083         descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4084         descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4085         descriptor.SMID = cpu_to_le16(smid);
4086
4087         writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4088 }
4089
4090 /**
4091  * _base_display_OEMs_branding - Display branding string
4092  * @ioc: per adapter object
4093  */
4094 static void
4095 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
4096 {
4097         if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
4098                 return;
4099
4100         switch (ioc->pdev->subsystem_vendor) {
4101         case PCI_VENDOR_ID_INTEL:
4102                 switch (ioc->pdev->device) {
4103                 case MPI2_MFGPAGE_DEVID_SAS2008:
4104                         switch (ioc->pdev->subsystem_device) {
4105                         case MPT2SAS_INTEL_RMS2LL080_SSDID:
4106                                 ioc_info(ioc, "%s\n",
4107                                          MPT2SAS_INTEL_RMS2LL080_BRANDING);
4108                                 break;
4109                         case MPT2SAS_INTEL_RMS2LL040_SSDID:
4110                                 ioc_info(ioc, "%s\n",
4111                                          MPT2SAS_INTEL_RMS2LL040_BRANDING);
4112                                 break;
4113                         case MPT2SAS_INTEL_SSD910_SSDID:
4114                                 ioc_info(ioc, "%s\n",
4115                                          MPT2SAS_INTEL_SSD910_BRANDING);
4116                                 break;
4117                         default:
4118                                 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4119                                          ioc->pdev->subsystem_device);
4120                                 break;
4121                         }
4122                         break;
4123                 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4124                         switch (ioc->pdev->subsystem_device) {
4125                         case MPT2SAS_INTEL_RS25GB008_SSDID:
4126                                 ioc_info(ioc, "%s\n",
4127                                          MPT2SAS_INTEL_RS25GB008_BRANDING);
4128                                 break;
4129                         case MPT2SAS_INTEL_RMS25JB080_SSDID:
4130                                 ioc_info(ioc, "%s\n",
4131                                          MPT2SAS_INTEL_RMS25JB080_BRANDING);
4132                                 break;
4133                         case MPT2SAS_INTEL_RMS25JB040_SSDID:
4134                                 ioc_info(ioc, "%s\n",
4135                                          MPT2SAS_INTEL_RMS25JB040_BRANDING);
4136                                 break;
4137                         case MPT2SAS_INTEL_RMS25KB080_SSDID:
4138                                 ioc_info(ioc, "%s\n",
4139                                          MPT2SAS_INTEL_RMS25KB080_BRANDING);
4140                                 break;
4141                         case MPT2SAS_INTEL_RMS25KB040_SSDID:
4142                                 ioc_info(ioc, "%s\n",
4143                                          MPT2SAS_INTEL_RMS25KB040_BRANDING);
4144                                 break;
4145                         case MPT2SAS_INTEL_RMS25LB040_SSDID:
4146                                 ioc_info(ioc, "%s\n",
4147                                          MPT2SAS_INTEL_RMS25LB040_BRANDING);
4148                                 break;
4149                         case MPT2SAS_INTEL_RMS25LB080_SSDID:
4150                                 ioc_info(ioc, "%s\n",
4151                                          MPT2SAS_INTEL_RMS25LB080_BRANDING);
4152                                 break;
4153                         default:
4154                                 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4155                                          ioc->pdev->subsystem_device);
4156                                 break;
4157                         }
4158                         break;
4159                 case MPI25_MFGPAGE_DEVID_SAS3008:
4160                         switch (ioc->pdev->subsystem_device) {
4161                         case MPT3SAS_INTEL_RMS3JC080_SSDID:
4162                                 ioc_info(ioc, "%s\n",
4163                                          MPT3SAS_INTEL_RMS3JC080_BRANDING);
4164                                 break;
4165
4166                         case MPT3SAS_INTEL_RS3GC008_SSDID:
4167                                 ioc_info(ioc, "%s\n",
4168                                          MPT3SAS_INTEL_RS3GC008_BRANDING);
4169                                 break;
4170                         case MPT3SAS_INTEL_RS3FC044_SSDID:
4171                                 ioc_info(ioc, "%s\n",
4172                                          MPT3SAS_INTEL_RS3FC044_BRANDING);
4173                                 break;
4174                         case MPT3SAS_INTEL_RS3UC080_SSDID:
4175                                 ioc_info(ioc, "%s\n",
4176                                          MPT3SAS_INTEL_RS3UC080_BRANDING);
4177                                 break;
4178                         default:
4179                                 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4180                                          ioc->pdev->subsystem_device);
4181                                 break;
4182                         }
4183                         break;
4184                 default:
4185                         ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4186                                  ioc->pdev->subsystem_device);
4187                         break;
4188                 }
4189                 break;
4190         case PCI_VENDOR_ID_DELL:
4191                 switch (ioc->pdev->device) {
4192                 case MPI2_MFGPAGE_DEVID_SAS2008:
4193                         switch (ioc->pdev->subsystem_device) {
4194                         case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
4195                                 ioc_info(ioc, "%s\n",
4196                                          MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
4197                                 break;
4198                         case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
4199                                 ioc_info(ioc, "%s\n",
4200                                          MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
4201                                 break;
4202                         case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
4203                                 ioc_info(ioc, "%s\n",
4204                                          MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
4205                                 break;
4206                         case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
4207                                 ioc_info(ioc, "%s\n",
4208                                          MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
4209                                 break;
4210                         case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
4211                                 ioc_info(ioc, "%s\n",
4212                                          MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
4213                                 break;
4214                         case MPT2SAS_DELL_PERC_H200_SSDID:
4215                                 ioc_info(ioc, "%s\n",
4216                                          MPT2SAS_DELL_PERC_H200_BRANDING);
4217                                 break;
4218                         case MPT2SAS_DELL_6GBPS_SAS_SSDID:
4219                                 ioc_info(ioc, "%s\n",
4220                                          MPT2SAS_DELL_6GBPS_SAS_BRANDING);
4221                                 break;
4222                         default:
4223                                 ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
4224                                          ioc->pdev->subsystem_device);
4225                                 break;
4226                         }
4227                         break;
4228                 case MPI25_MFGPAGE_DEVID_SAS3008:
4229                         switch (ioc->pdev->subsystem_device) {
4230                         case MPT3SAS_DELL_12G_HBA_SSDID:
4231                                 ioc_info(ioc, "%s\n",
4232                                          MPT3SAS_DELL_12G_HBA_BRANDING);
4233                                 break;
4234                         default:
4235                                 ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
4236                                          ioc->pdev->subsystem_device);
4237                                 break;
4238                         }
4239                         break;
4240                 default:
4241                         ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
4242                                  ioc->pdev->subsystem_device);
4243                         break;
4244                 }
4245                 break;
4246         case PCI_VENDOR_ID_CISCO:
4247                 switch (ioc->pdev->device) {
4248                 case MPI25_MFGPAGE_DEVID_SAS3008:
4249                         switch (ioc->pdev->subsystem_device) {
4250                         case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
4251                                 ioc_info(ioc, "%s\n",
4252                                          MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
4253                                 break;
4254                         case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
4255                                 ioc_info(ioc, "%s\n",
4256                                          MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
4257                                 break;
4258                         case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4259                                 ioc_info(ioc, "%s\n",
4260                                          MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4261                                 break;
4262                         default:
4263                                 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4264                                          ioc->pdev->subsystem_device);
4265                                 break;
4266                         }
4267                         break;
4268                 case MPI25_MFGPAGE_DEVID_SAS3108_1:
4269                         switch (ioc->pdev->subsystem_device) {
4270                         case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4271                                 ioc_info(ioc, "%s\n",
4272                                          MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4273                                 break;
4274                         case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
4275                                 ioc_info(ioc, "%s\n",
4276                                          MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
4277                                 break;
4278                         default:
4279                                 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4280                                          ioc->pdev->subsystem_device);
4281                                 break;
4282                         }
4283                         break;
4284                 default:
4285                         ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
4286                                  ioc->pdev->subsystem_device);
4287                         break;
4288                 }
4289                 break;
4290         case MPT2SAS_HP_3PAR_SSVID:
4291                 switch (ioc->pdev->device) {
4292                 case MPI2_MFGPAGE_DEVID_SAS2004:
4293                         switch (ioc->pdev->subsystem_device) {
4294                         case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
4295                                 ioc_info(ioc, "%s\n",
4296                                          MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
4297                                 break;
4298                         default:
4299                                 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4300                                          ioc->pdev->subsystem_device);
4301                                 break;
4302                         }
4303                         break;
4304                 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4305                         switch (ioc->pdev->subsystem_device) {
4306                         case MPT2SAS_HP_2_4_INTERNAL_SSDID:
4307                                 ioc_info(ioc, "%s\n",
4308                                          MPT2SAS_HP_2_4_INTERNAL_BRANDING);
4309                                 break;
4310                         case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
4311                                 ioc_info(ioc, "%s\n",
4312                                          MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
4313                                 break;
4314                         case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
4315                                 ioc_info(ioc, "%s\n",
4316                                          MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
4317                                 break;
4318                         case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
4319                                 ioc_info(ioc, "%s\n",
4320                                          MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
4321                                 break;
4322                         default:
4323                                 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4324                                          ioc->pdev->subsystem_device);
4325                                 break;
4326                         }
4327                         break;
4328                 default:
4329                         ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
4330                                  ioc->pdev->subsystem_device);
4331                         break;
4332                 }
4333         default:
4334                 break;
4335         }
4336 }
4337
4338 /**
4339  * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
4340  *                              version from FW Image Header.
4341  * @ioc: per adapter object
4342  *
4343  * Return: 0 for success, non-zero for failure.
4344  */
4345         static int
4346 _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
4347 {
4348         Mpi2FWImageHeader_t *fw_img_hdr;
4349         Mpi26ComponentImageHeader_t *cmp_img_hdr;
4350         Mpi25FWUploadRequest_t *mpi_request;
4351         Mpi2FWUploadReply_t mpi_reply;
4352         int r = 0;
4353         u32  package_version = 0;
4354         void *fwpkg_data = NULL;
4355         dma_addr_t fwpkg_data_dma;
4356         u16 smid, ioc_status;
4357         size_t data_length;
4358
4359         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4360
4361         if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4362                 ioc_err(ioc, "%s: internal command already in use\n", __func__);
4363                 return -EAGAIN;
4364         }
4365
4366         data_length = sizeof(Mpi2FWImageHeader_t);
4367         fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
4368                         &fwpkg_data_dma, GFP_KERNEL);
4369         if (!fwpkg_data) {
4370                 ioc_err(ioc,
4371                     "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
4372                         __FILE__, __LINE__, __func__);
4373                 return -ENOMEM;
4374         }
4375
4376         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4377         if (!smid) {
4378                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4379                 r = -EAGAIN;
4380                 goto out;
4381         }
4382
4383         ioc->base_cmds.status = MPT3_CMD_PENDING;
4384         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4385         ioc->base_cmds.smid = smid;
4386         memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
4387         mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
4388         mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
4389         mpi_request->ImageSize = cpu_to_le32(data_length);
4390         ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
4391                         data_length);
4392         init_completion(&ioc->base_cmds.done);
4393         ioc->put_smid_default(ioc, smid);
4394         /* Wait for 15 seconds */
4395         wait_for_completion_timeout(&ioc->base_cmds.done,
4396                         FW_IMG_HDR_READ_TIMEOUT*HZ);
4397         ioc_info(ioc, "%s: complete\n", __func__);
4398         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4399                 ioc_err(ioc, "%s: timeout\n", __func__);
4400                 _debug_dump_mf(mpi_request,
4401                                 sizeof(Mpi25FWUploadRequest_t)/4);
4402                 r = -ETIME;
4403         } else {
4404                 memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
4405                 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
4406                         memcpy(&mpi_reply, ioc->base_cmds.reply,
4407                                         sizeof(Mpi2FWUploadReply_t));
4408                         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4409                                                 MPI2_IOCSTATUS_MASK;
4410                         if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4411                                 fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
4412                                 if (le32_to_cpu(fw_img_hdr->Signature) ==
4413                                     MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
4414                                         cmp_img_hdr =
4415                                             (Mpi26ComponentImageHeader_t *)
4416                                             (fwpkg_data);
4417                                         package_version =
4418                                             le32_to_cpu(
4419                                             cmp_img_hdr->ApplicationSpecific);
4420                                 } else
4421                                         package_version =
4422                                             le32_to_cpu(
4423                                             fw_img_hdr->PackageVersion.Word);
4424                                 if (package_version)
4425                                         ioc_info(ioc,
4426                                         "FW Package Ver(%02d.%02d.%02d.%02d)\n",
4427                                         ((package_version) & 0xFF000000) >> 24,
4428                                         ((package_version) & 0x00FF0000) >> 16,
4429                                         ((package_version) & 0x0000FF00) >> 8,
4430                                         (package_version) & 0x000000FF);
4431                         } else {
4432                                 _debug_dump_mf(&mpi_reply,
4433                                                 sizeof(Mpi2FWUploadReply_t)/4);
4434                         }
4435                 }
4436         }
4437         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4438 out:
4439         if (fwpkg_data)
4440                 dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
4441                                 fwpkg_data_dma);
4442         return r;
4443 }
4444
4445 /**
4446  * _base_display_ioc_capabilities - Disply IOC's capabilities.
4447  * @ioc: per adapter object
4448  */
4449 static void
4450 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
4451 {
4452         int i = 0;
4453         char desc[16];
4454         u32 iounit_pg1_flags;
4455         u32 bios_version;
4456
4457         bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
4458         strncpy(desc, ioc->manu_pg0.ChipName, 16);
4459         ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
4460                  desc,
4461                  (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
4462                  (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
4463                  (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
4464                  ioc->facts.FWVersion.Word & 0x000000FF,
4465                  ioc->pdev->revision,
4466                  (bios_version & 0xFF000000) >> 24,
4467                  (bios_version & 0x00FF0000) >> 16,
4468                  (bios_version & 0x0000FF00) >> 8,
4469                  bios_version & 0x000000FF);
4470
4471         _base_display_OEMs_branding(ioc);
4472
4473         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4474                 pr_info("%sNVMe", i ? "," : "");
4475                 i++;
4476         }
4477
4478         ioc_info(ioc, "Protocol=(");
4479
4480         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
4481                 pr_cont("Initiator");
4482                 i++;
4483         }
4484
4485         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
4486                 pr_cont("%sTarget", i ? "," : "");
4487                 i++;
4488         }
4489
4490         i = 0;
4491         pr_cont("), Capabilities=(");
4492
4493         if (!ioc->hide_ir_msg) {
4494                 if (ioc->facts.IOCCapabilities &
4495                     MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
4496                         pr_cont("Raid");
4497                         i++;
4498                 }
4499         }
4500
4501         if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
4502                 pr_cont("%sTLR", i ? "," : "");
4503                 i++;
4504         }
4505
4506         if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
4507                 pr_cont("%sMulticast", i ? "," : "");
4508                 i++;
4509         }
4510
4511         if (ioc->facts.IOCCapabilities &
4512             MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
4513                 pr_cont("%sBIDI Target", i ? "," : "");
4514                 i++;
4515         }
4516
4517         if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4518                 pr_cont("%sEEDP", i ? "," : "");
4519                 i++;
4520         }
4521
4522         if (ioc->facts.IOCCapabilities &
4523             MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
4524                 pr_cont("%sSnapshot Buffer", i ? "," : "");
4525                 i++;
4526         }
4527
4528         if (ioc->facts.IOCCapabilities &
4529             MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
4530                 pr_cont("%sDiag Trace Buffer", i ? "," : "");
4531                 i++;
4532         }
4533
4534         if (ioc->facts.IOCCapabilities &
4535             MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
4536                 pr_cont("%sDiag Extended Buffer", i ? "," : "");
4537                 i++;
4538         }
4539
4540         if (ioc->facts.IOCCapabilities &
4541             MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
4542                 pr_cont("%sTask Set Full", i ? "," : "");
4543                 i++;
4544         }
4545
4546         iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4547         if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
4548                 pr_cont("%sNCQ", i ? "," : "");
4549                 i++;
4550         }
4551
4552         pr_cont(")\n");
4553 }
4554
4555 /**
4556  * mpt3sas_base_update_missing_delay - change the missing delay timers
4557  * @ioc: per adapter object
4558  * @device_missing_delay: amount of time till device is reported missing
4559  * @io_missing_delay: interval IO is returned when there is a missing device
4560  *
4561  * Passed on the command line, this function will modify the device missing
4562  * delay, as well as the io missing delay. This should be called at driver
4563  * load time.
4564  */
4565 void
4566 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4567         u16 device_missing_delay, u8 io_missing_delay)
4568 {
4569         u16 dmd, dmd_new, dmd_orignal;
4570         u8 io_missing_delay_original;
4571         u16 sz;
4572         Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4573         Mpi2ConfigReply_t mpi_reply;
4574         u8 num_phys = 0;
4575         u16 ioc_status;
4576
4577         mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4578         if (!num_phys)
4579                 return;
4580
4581         sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4582             sizeof(Mpi2SasIOUnit1PhyData_t));
4583         sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4584         if (!sas_iounit_pg1) {
4585                 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4586                         __FILE__, __LINE__, __func__);
4587                 goto out;
4588         }
4589         if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4590             sas_iounit_pg1, sz))) {
4591                 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4592                         __FILE__, __LINE__, __func__);
4593                 goto out;
4594         }
4595         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4596             MPI2_IOCSTATUS_MASK;
4597         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4598                 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4599                         __FILE__, __LINE__, __func__);
4600                 goto out;
4601         }
4602
4603         /* device missing delay */
4604         dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4605         if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4606                 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4607         else
4608                 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4609         dmd_orignal = dmd;
4610         if (device_missing_delay > 0x7F) {
4611                 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4612                     device_missing_delay;
4613                 dmd = dmd / 16;
4614                 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4615         } else
4616                 dmd = device_missing_delay;
4617         sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4618
4619         /* io missing delay */
4620         io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4621         sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4622
4623         if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4624             sz)) {
4625                 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4626                         dmd_new = (dmd &
4627                             MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4628                 else
4629                         dmd_new =
4630                     dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4631                 ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4632                          dmd_orignal, dmd_new);
4633                 ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4634                          io_missing_delay_original,
4635                          io_missing_delay);
4636                 ioc->device_missing_delay = dmd_new;
4637                 ioc->io_missing_delay = io_missing_delay;
4638         }
4639
4640 out:
4641         kfree(sas_iounit_pg1);
4642 }
4643
4644 /**
4645  * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields
4646  *    according to performance mode.
4647  * @ioc : per adapter object
4648  *
4649  * Return nothing.
4650  */
4651 static void
4652 _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
4653 {
4654         Mpi2IOCPage1_t ioc_pg1;
4655         Mpi2ConfigReply_t mpi_reply;
4656
4657         mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
4658         memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
4659
4660         switch (perf_mode) {
4661         case MPT_PERF_MODE_DEFAULT:
4662         case MPT_PERF_MODE_BALANCED:
4663                 if (ioc->high_iops_queues) {
4664                         ioc_info(ioc,
4665                                 "Enable interrupt coalescing only for first\t"
4666                                 "%d reply queues\n",
4667                                 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
4668                         /*
4669                          * If 31st bit is zero then interrupt coalescing is
4670                          * enabled for all reply descriptor post queues.
4671                          * If 31st bit is set to one then user can
4672                          * enable/disable interrupt coalescing on per reply
4673                          * descriptor post queue group(8) basis. So to enable
4674                          * interrupt coalescing only on first reply descriptor
4675                          * post queue group 31st bit and zero th bit is enabled.
4676                          */
4677                         ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
4678                             ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
4679                         mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4680                         ioc_info(ioc, "performance mode: balanced\n");
4681                         return;
4682                 }
4683                 /* Fall through */
4684         case MPT_PERF_MODE_LATENCY:
4685                 /*
4686                  * Enable interrupt coalescing on all reply queues
4687                  * with timeout value 0xA
4688                  */
4689                 ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
4690                 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4691                 ioc_pg1.ProductSpecific = 0;
4692                 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4693                 ioc_info(ioc, "performance mode: latency\n");
4694                 break;
4695         case MPT_PERF_MODE_IOPS:
4696                 /*
4697                  * Enable interrupt coalescing on all reply queues.
4698                  */
4699                 ioc_info(ioc,
4700                     "performance mode: iops with coalescing timeout: 0x%x\n",
4701                     le32_to_cpu(ioc_pg1.CoalescingTimeout));
4702                 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4703                 ioc_pg1.ProductSpecific = 0;
4704                 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4705                 break;
4706         }
4707 }
4708
4709 /**
4710  * _base_static_config_pages - static start of day config pages
4711  * @ioc: per adapter object
4712  */
4713 static void
4714 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
4715 {
4716         Mpi2ConfigReply_t mpi_reply;
4717         u32 iounit_pg1_flags;
4718
4719         ioc->nvme_abort_timeout = 30;
4720         mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
4721         if (ioc->ir_firmware)
4722                 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
4723                     &ioc->manu_pg10);
4724
4725         /*
4726          * Ensure correct T10 PI operation if vendor left EEDPTagMode
4727          * flag unset in NVDATA.
4728          */
4729         mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
4730         if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
4731                 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
4732                     ioc->name);
4733                 ioc->manu_pg11.EEDPTagMode &= ~0x3;
4734                 ioc->manu_pg11.EEDPTagMode |= 0x1;
4735                 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
4736                     &ioc->manu_pg11);
4737         }
4738         if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
4739                 ioc->tm_custom_handling = 1;
4740         else {
4741                 ioc->tm_custom_handling = 0;
4742                 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
4743                         ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
4744                 else if (ioc->manu_pg11.NVMeAbortTO >
4745                                         NVME_TASK_ABORT_MAX_TIMEOUT)
4746                         ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
4747                 else
4748                         ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
4749         }
4750
4751         mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
4752         mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
4753         mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
4754         mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
4755         mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4756         mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
4757         _base_display_ioc_capabilities(ioc);
4758
4759         /*
4760          * Enable task_set_full handling in iounit_pg1 when the
4761          * facts capabilities indicate that its supported.
4762          */
4763         iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4764         if ((ioc->facts.IOCCapabilities &
4765             MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
4766                 iounit_pg1_flags &=
4767                     ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4768         else
4769                 iounit_pg1_flags |=
4770                     MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4771         ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
4772         mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4773
4774         if (ioc->iounit_pg8.NumSensors)
4775                 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
4776         if (ioc->is_aero_ioc)
4777                 _base_update_ioc_page1_inlinewith_perf_mode(ioc);
4778 }
4779
4780 /**
4781  * mpt3sas_free_enclosure_list - release memory
4782  * @ioc: per adapter object
4783  *
4784  * Free memory allocated during encloure add.
4785  */
4786 void
4787 mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
4788 {
4789         struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
4790
4791         /* Free enclosure list */
4792         list_for_each_entry_safe(enclosure_dev,
4793                         enclosure_dev_next, &ioc->enclosure_list, list) {
4794                 list_del(&enclosure_dev->list);
4795                 kfree(enclosure_dev);
4796         }
4797 }
4798
4799 /**
4800  * _base_release_memory_pools - release memory
4801  * @ioc: per adapter object
4802  *
4803  * Free memory allocated from _base_allocate_memory_pools.
4804  */
4805 static void
4806 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4807 {
4808         int i = 0;
4809         int j = 0;
4810         int dma_alloc_count = 0;
4811         struct chain_tracker *ct;
4812         int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
4813
4814         dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4815
4816         if (ioc->request) {
4817                 dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
4818                     ioc->request,  ioc->request_dma);
4819                 dexitprintk(ioc,
4820                             ioc_info(ioc, "request_pool(0x%p): free\n",
4821                                      ioc->request));
4822                 ioc->request = NULL;
4823         }
4824
4825         if (ioc->sense) {
4826                 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4827                 dma_pool_destroy(ioc->sense_dma_pool);
4828                 dexitprintk(ioc,
4829                             ioc_info(ioc, "sense_pool(0x%p): free\n",
4830                                      ioc->sense));
4831                 ioc->sense = NULL;
4832         }
4833
4834         if (ioc->reply) {
4835                 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
4836                 dma_pool_destroy(ioc->reply_dma_pool);
4837                 dexitprintk(ioc,
4838                             ioc_info(ioc, "reply_pool(0x%p): free\n",
4839                                      ioc->reply));
4840                 ioc->reply = NULL;
4841         }
4842
4843         if (ioc->reply_free) {
4844                 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
4845                     ioc->reply_free_dma);
4846                 dma_pool_destroy(ioc->reply_free_dma_pool);
4847                 dexitprintk(ioc,
4848                             ioc_info(ioc, "reply_free_pool(0x%p): free\n",
4849                                      ioc->reply_free));
4850                 ioc->reply_free = NULL;
4851         }
4852
4853         if (ioc->reply_post) {
4854                 dma_alloc_count = DIV_ROUND_UP(count,
4855                                 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
4856                 for (i = 0; i < count; i++) {
4857                         if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
4858                             && dma_alloc_count) {
4859                                 if (ioc->reply_post[i].reply_post_free) {
4860                                         dma_pool_free(
4861                                             ioc->reply_post_free_dma_pool,
4862                                             ioc->reply_post[i].reply_post_free,
4863                                         ioc->reply_post[i].reply_post_free_dma);
4864                                         dexitprintk(ioc, ioc_info(ioc,
4865                                            "reply_post_free_pool(0x%p): free\n",
4866                                            ioc->reply_post[i].reply_post_free));
4867                                         ioc->reply_post[i].reply_post_free =
4868                                                                         NULL;
4869                                 }
4870                                 --dma_alloc_count;
4871                         }
4872                 }
4873                 dma_pool_destroy(ioc->reply_post_free_dma_pool);
4874                 if (ioc->reply_post_free_array &&
4875                         ioc->rdpq_array_enable) {
4876                         dma_pool_free(ioc->reply_post_free_array_dma_pool,
4877                             ioc->reply_post_free_array,
4878                             ioc->reply_post_free_array_dma);
4879                         ioc->reply_post_free_array = NULL;
4880                 }
4881                 dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
4882                 kfree(ioc->reply_post);
4883         }
4884
4885         if (ioc->pcie_sgl_dma_pool) {
4886                 for (i = 0; i < ioc->scsiio_depth; i++) {
4887                         dma_pool_free(ioc->pcie_sgl_dma_pool,
4888                                         ioc->pcie_sg_lookup[i].pcie_sgl,
4889                                         ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4890                 }
4891                 dma_pool_destroy(ioc->pcie_sgl_dma_pool);
4892         }
4893
4894         if (ioc->config_page) {
4895                 dexitprintk(ioc,
4896                             ioc_info(ioc, "config_page(0x%p): free\n",
4897                                      ioc->config_page));
4898                 dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
4899                     ioc->config_page, ioc->config_page_dma);
4900         }
4901
4902         kfree(ioc->hpr_lookup);
4903         ioc->hpr_lookup = NULL;
4904         kfree(ioc->internal_lookup);
4905         ioc->internal_lookup = NULL;
4906         if (ioc->chain_lookup) {
4907                 for (i = 0; i < ioc->scsiio_depth; i++) {
4908                         for (j = ioc->chains_per_prp_buffer;
4909                             j < ioc->chains_needed_per_io; j++) {
4910                                 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4911                                 if (ct && ct->chain_buffer)
4912                                         dma_pool_free(ioc->chain_dma_pool,
4913                                                 ct->chain_buffer,
4914                                                 ct->chain_buffer_dma);
4915                         }
4916                         kfree(ioc->chain_lookup[i].chains_per_smid);
4917                 }
4918                 dma_pool_destroy(ioc->chain_dma_pool);
4919                 kfree(ioc->chain_lookup);
4920                 ioc->chain_lookup = NULL;
4921         }
4922 }
4923
4924 /**
4925  * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
4926  *      having same upper 32bits in their base memory address.
4927  * @reply_pool_start_address: Base address of a reply queue set
4928  * @pool_sz: Size of single Reply Descriptor Post Queues pool size
4929  *
4930  * Return: 1 if reply queues in a set have a same upper 32bits in their base
4931  * memory address, else 0.
4932  */
4933
4934 static int
4935 mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
4936 {
4937         long reply_pool_end_address;
4938
4939         reply_pool_end_address = reply_pool_start_address + pool_sz;
4940
4941         if (upper_32_bits(reply_pool_start_address) ==
4942                 upper_32_bits(reply_pool_end_address))
4943                 return 1;
4944         else
4945                 return 0;
4946 }
4947
4948 /**
4949  * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
4950  *                     for reply queues.
4951  * @ioc: per adapter object
4952  * @sz: DMA Pool size
4953  * Return: 0 for success, non-zero for failure.
4954  */
4955 static int
4956 base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
4957 {
4958         int i = 0;
4959         u32 dma_alloc_count = 0;
4960         int reply_post_free_sz = ioc->reply_post_queue_depth *
4961                 sizeof(Mpi2DefaultReplyDescriptor_t);
4962         int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
4963
4964         ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
4965                         GFP_KERNEL);
4966         if (!ioc->reply_post)
4967                 return -ENOMEM;
4968         /*
4969          *  For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
4970          *  VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should
4971          *  be within 4GB boundary i.e reply queues in a set must have same
4972          *  upper 32-bits in their memory address. so here driver is allocating
4973          *  the DMA'able memory for reply queues according.
4974          *  Driver uses limitation of
4975          *  VENTURA_SERIES to manage INVADER_SERIES as well.
4976          */
4977         dma_alloc_count = DIV_ROUND_UP(count,
4978                                 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
4979         ioc->reply_post_free_dma_pool =
4980                 dma_pool_create("reply_post_free pool",
4981                     &ioc->pdev->dev, sz, 16, 0);
4982         if (!ioc->reply_post_free_dma_pool)
4983                 return -ENOMEM;
4984         for (i = 0; i < count; i++) {
4985                 if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
4986                         ioc->reply_post[i].reply_post_free =
4987                             dma_pool_zalloc(ioc->reply_post_free_dma_pool,
4988                                 GFP_KERNEL,
4989                                 &ioc->reply_post[i].reply_post_free_dma);
4990                         if (!ioc->reply_post[i].reply_post_free)
4991                                 return -ENOMEM;
4992                         /*
4993                          * Each set of RDPQ pool must satisfy 4gb boundary
4994                          * restriction.
4995                          * 1) Check if allocated resources for RDPQ pool are in
4996                          *      the same 4GB range.
4997                          * 2) If #1 is true, continue with 64 bit DMA.
4998                          * 3) If #1 is false, return 1. which means free all the
4999                          * resources and set DMA mask to 32 and allocate.
5000                          */
5001                         if (!mpt3sas_check_same_4gb_region(
5002                                 (long)ioc->reply_post[i].reply_post_free, sz)) {
5003                                 dinitprintk(ioc,
5004                                     ioc_err(ioc, "bad Replypost free pool(0x%p)"
5005                                     "reply_post_free_dma = (0x%llx)\n",
5006                                     ioc->reply_post[i].reply_post_free,
5007                                     (unsigned long long)
5008                                     ioc->reply_post[i].reply_post_free_dma));
5009                                 return -EAGAIN;
5010                         }
5011                         dma_alloc_count--;
5012
5013                 } else {
5014                         ioc->reply_post[i].reply_post_free =
5015                             (Mpi2ReplyDescriptorsUnion_t *)
5016                             ((long)ioc->reply_post[i-1].reply_post_free
5017                             + reply_post_free_sz);
5018                         ioc->reply_post[i].reply_post_free_dma =
5019                             (dma_addr_t)
5020                             (ioc->reply_post[i-1].reply_post_free_dma +
5021                             reply_post_free_sz);
5022                 }
5023         }
5024         return 0;
5025 }
5026
5027 /**
5028  * _base_allocate_memory_pools - allocate start of day memory pools
5029  * @ioc: per adapter object
5030  *
5031  * Return: 0 success, anything else error.
5032  */
5033 static int
5034 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5035 {
5036         struct mpt3sas_facts *facts;
5037         u16 max_sge_elements;
5038         u16 chains_needed_per_io;
5039         u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
5040         u32 retry_sz;
5041         u32 rdpq_sz = 0;
5042         u16 max_request_credit, nvme_blocks_needed;
5043         unsigned short sg_tablesize;
5044         u16 sge_size;
5045         int i, j;
5046         int ret = 0;
5047         struct chain_tracker *ct;
5048
5049         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5050
5051
5052         retry_sz = 0;
5053         facts = &ioc->facts;
5054
5055         /* command line tunables for max sgl entries */
5056         if (max_sgl_entries != -1)
5057                 sg_tablesize = max_sgl_entries;
5058         else {
5059                 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
5060                         sg_tablesize = MPT2SAS_SG_DEPTH;
5061                 else
5062                         sg_tablesize = MPT3SAS_SG_DEPTH;
5063         }
5064
5065         /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
5066         if (reset_devices)
5067                 sg_tablesize = min_t(unsigned short, sg_tablesize,
5068                    MPT_KDUMP_MIN_PHYS_SEGMENTS);
5069
5070         if (ioc->is_mcpu_endpoint)
5071                 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5072         else {
5073                 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
5074                         sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5075                 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
5076                         sg_tablesize = min_t(unsigned short, sg_tablesize,
5077                                         SG_MAX_SEGMENTS);
5078                         ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
5079                                  sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
5080                 }
5081                 ioc->shost->sg_tablesize = sg_tablesize;
5082         }
5083
5084         ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
5085                 (facts->RequestCredit / 4));
5086         if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
5087                 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
5088                                 INTERNAL_SCSIIO_CMDS_COUNT)) {
5089                         ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
5090                                 facts->RequestCredit);
5091                         return -ENOMEM;
5092                 }
5093                 ioc->internal_depth = 10;
5094         }
5095
5096         ioc->hi_priority_depth = ioc->internal_depth - (5);
5097         /* command line tunables  for max controller queue depth */
5098         if (max_queue_depth != -1 && max_queue_depth != 0) {
5099                 max_request_credit = min_t(u16, max_queue_depth +
5100                         ioc->internal_depth, facts->RequestCredit);
5101                 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
5102                         max_request_credit =  MAX_HBA_QUEUE_DEPTH;
5103         } else if (reset_devices)
5104                 max_request_credit = min_t(u16, facts->RequestCredit,
5105                     (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
5106         else
5107                 max_request_credit = min_t(u16, facts->RequestCredit,
5108                     MAX_HBA_QUEUE_DEPTH);
5109
5110         /* Firmware maintains additional facts->HighPriorityCredit number of
5111          * credits for HiPriprity Request messages, so hba queue depth will be
5112          * sum of max_request_credit and high priority queue depth.
5113          */
5114         ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
5115
5116         /* request frame size */
5117         ioc->request_sz = facts->IOCRequestFrameSize * 4;
5118
5119         /* reply frame size */
5120         ioc->reply_sz = facts->ReplyFrameSize * 4;
5121
5122         /* chain segment size */
5123         if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
5124                 if (facts->IOCMaxChainSegmentSize)
5125                         ioc->chain_segment_sz =
5126                                         facts->IOCMaxChainSegmentSize *
5127                                         MAX_CHAIN_ELEMT_SZ;
5128                 else
5129                 /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
5130                         ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
5131                                                     MAX_CHAIN_ELEMT_SZ;
5132         } else
5133                 ioc->chain_segment_sz = ioc->request_sz;
5134
5135         /* calculate the max scatter element size */
5136         sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
5137
5138  retry_allocation:
5139         total_sz = 0;
5140         /* calculate number of sg elements left over in the 1st frame */
5141         max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
5142             sizeof(Mpi2SGEIOUnion_t)) + sge_size);
5143         ioc->max_sges_in_main_message = max_sge_elements/sge_size;
5144
5145         /* now do the same for a chain buffer */
5146         max_sge_elements = ioc->chain_segment_sz - sge_size;
5147         ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
5148
5149         /*
5150          *  MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
5151          */
5152         chains_needed_per_io = ((ioc->shost->sg_tablesize -
5153            ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
5154             + 1;
5155         if (chains_needed_per_io > facts->MaxChainDepth) {
5156                 chains_needed_per_io = facts->MaxChainDepth;
5157                 ioc->shost->sg_tablesize = min_t(u16,
5158                 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
5159                 * chains_needed_per_io), ioc->shost->sg_tablesize);
5160         }
5161         ioc->chains_needed_per_io = chains_needed_per_io;
5162
5163         /* reply free queue sizing - taking into account for 64 FW events */
5164         ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5165
5166         /* mCPU manage single counters for simplicity */
5167         if (ioc->is_mcpu_endpoint)
5168                 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
5169         else {
5170                 /* calculate reply descriptor post queue depth */
5171                 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
5172                         ioc->reply_free_queue_depth +  1;
5173                 /* align the reply post queue on the next 16 count boundary */
5174                 if (ioc->reply_post_queue_depth % 16)
5175                         ioc->reply_post_queue_depth += 16 -
5176                                 (ioc->reply_post_queue_depth % 16);
5177         }
5178
5179         if (ioc->reply_post_queue_depth >
5180             facts->MaxReplyDescriptorPostQueueDepth) {
5181                 ioc->reply_post_queue_depth =
5182                                 facts->MaxReplyDescriptorPostQueueDepth -
5183                     (facts->MaxReplyDescriptorPostQueueDepth % 16);
5184                 ioc->hba_queue_depth =
5185                                 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
5186                 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5187         }
5188
5189         ioc_info(ioc,
5190             "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
5191             "sge_per_io(%d), chains_per_io(%d)\n",
5192             ioc->max_sges_in_main_message,
5193             ioc->max_sges_in_chain_message,
5194             ioc->shost->sg_tablesize,
5195             ioc->chains_needed_per_io);
5196
5197         /* reply post queue, 16 byte align */
5198         reply_post_free_sz = ioc->reply_post_queue_depth *
5199             sizeof(Mpi2DefaultReplyDescriptor_t);
5200         rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
5201         if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
5202                 rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
5203         ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
5204         if (ret == -EAGAIN) {
5205                 /*
5206                  * Free allocated bad RDPQ memory pools.
5207                  * Change dma coherent mask to 32 bit and reallocate RDPQ
5208                  */
5209                 _base_release_memory_pools(ioc);
5210                 ioc->use_32bit_dma = true;
5211                 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
5212                         ioc_err(ioc,
5213                             "32 DMA mask failed %s\n", pci_name(ioc->pdev));
5214                         return -ENODEV;
5215                 }
5216                 if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
5217                         return -ENOMEM;
5218         } else if (ret == -ENOMEM)
5219                 return -ENOMEM;
5220         total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
5221             DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
5222         ioc->scsiio_depth = ioc->hba_queue_depth -
5223             ioc->hi_priority_depth - ioc->internal_depth;
5224
5225         /* set the scsi host can_queue depth
5226          * with some internal commands that could be outstanding
5227          */
5228         ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
5229         dinitprintk(ioc,
5230                     ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
5231                              ioc->shost->can_queue));
5232
5233         /* contiguous pool for request and chains, 16 byte align, one extra "
5234          * "frame for smid=0
5235          */
5236         ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
5237         sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
5238
5239         /* hi-priority queue */
5240         sz += (ioc->hi_priority_depth * ioc->request_sz);
5241
5242         /* internal queue */
5243         sz += (ioc->internal_depth * ioc->request_sz);
5244
5245         ioc->request_dma_sz = sz;
5246         ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
5247                         &ioc->request_dma, GFP_KERNEL);
5248         if (!ioc->request) {
5249                 ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
5250                         ioc->hba_queue_depth, ioc->chains_needed_per_io,
5251                         ioc->request_sz, sz / 1024);
5252                 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
5253                         goto out;
5254                 retry_sz = 64;
5255                 ioc->hba_queue_depth -= retry_sz;
5256                 _base_release_memory_pools(ioc);
5257                 goto retry_allocation;
5258         }
5259         memset(ioc->request, 0, sz);
5260
5261         if (retry_sz)
5262                 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
5263                         ioc->hba_queue_depth, ioc->chains_needed_per_io,
5264                         ioc->request_sz, sz / 1024);
5265
5266         /* hi-priority queue */
5267         ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
5268             ioc->request_sz);
5269         ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
5270             ioc->request_sz);
5271
5272         /* internal queue */
5273         ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
5274             ioc->request_sz);
5275         ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
5276             ioc->request_sz);
5277
5278         ioc_info(ioc,
5279             "request pool(0x%p) - dma(0x%llx): "
5280             "depth(%d), frame_size(%d), pool_size(%d kB)\n",
5281             ioc->request, (unsigned long long) ioc->request_dma,
5282             ioc->hba_queue_depth, ioc->request_sz,
5283             (ioc->hba_queue_depth * ioc->request_sz) / 1024);
5284
5285         total_sz += sz;
5286
5287         dinitprintk(ioc,
5288                     ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
5289                              ioc->request, ioc->scsiio_depth));
5290
5291         ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
5292         sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
5293         ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
5294         if (!ioc->chain_lookup) {
5295                 ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
5296                 goto out;
5297         }
5298
5299         sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
5300         for (i = 0; i < ioc->scsiio_depth; i++) {
5301                 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
5302                 if (!ioc->chain_lookup[i].chains_per_smid) {
5303                         ioc_err(ioc, "chain_lookup: kzalloc failed\n");
5304                         goto out;
5305                 }
5306         }
5307
5308         /* initialize hi-priority queue smid's */
5309         ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
5310             sizeof(struct request_tracker), GFP_KERNEL);
5311         if (!ioc->hpr_lookup) {
5312                 ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
5313                 goto out;
5314         }
5315         ioc->hi_priority_smid = ioc->scsiio_depth + 1;
5316         dinitprintk(ioc,
5317                     ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
5318                              ioc->hi_priority,
5319                              ioc->hi_priority_depth, ioc->hi_priority_smid));
5320
5321         /* initialize internal queue smid's */
5322         ioc->internal_lookup = kcalloc(ioc->internal_depth,
5323             sizeof(struct request_tracker), GFP_KERNEL);
5324         if (!ioc->internal_lookup) {
5325                 ioc_err(ioc, "internal_lookup: kcalloc failed\n");
5326                 goto out;
5327         }
5328         ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
5329         dinitprintk(ioc,
5330                     ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
5331                              ioc->internal,
5332                              ioc->internal_depth, ioc->internal_smid));
5333         /*
5334          * The number of NVMe page sized blocks needed is:
5335          *     (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
5336          * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
5337          * that is placed in the main message frame.  8 is the size of each PRP
5338          * entry or PRP list pointer entry.  8 is subtracted from page_size
5339          * because of the PRP list pointer entry at the end of a page, so this
5340          * is not counted as a PRP entry.  The 1 added page is a round up.
5341          *
5342          * To avoid allocation failures due to the amount of memory that could
5343          * be required for NVMe PRP's, only each set of NVMe blocks will be
5344          * contiguous, so a new set is allocated for each possible I/O.
5345          */
5346         ioc->chains_per_prp_buffer = 0;
5347         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
5348                 nvme_blocks_needed =
5349                         (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
5350                 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
5351                 nvme_blocks_needed++;
5352
5353                 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
5354                 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
5355                 if (!ioc->pcie_sg_lookup) {
5356                         ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
5357                         goto out;
5358                 }
5359                 sz = nvme_blocks_needed * ioc->page_size;
5360                 ioc->pcie_sgl_dma_pool =
5361                         dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
5362                 if (!ioc->pcie_sgl_dma_pool) {
5363                         ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
5364                         goto out;
5365                 }
5366
5367                 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
5368                 ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
5369                                                 ioc->chains_needed_per_io);
5370
5371                 for (i = 0; i < ioc->scsiio_depth; i++) {
5372                         ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
5373                                 ioc->pcie_sgl_dma_pool, GFP_KERNEL,
5374                                 &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5375                         if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
5376                                 ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
5377                                 goto out;
5378                         }
5379                         for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
5380                                 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5381                                 ct->chain_buffer =
5382                                     ioc->pcie_sg_lookup[i].pcie_sgl +
5383                                     (j * ioc->chain_segment_sz);
5384                                 ct->chain_buffer_dma =
5385                                     ioc->pcie_sg_lookup[i].pcie_sgl_dma +
5386                                     (j * ioc->chain_segment_sz);
5387                         }
5388                 }
5389
5390                 dinitprintk(ioc,
5391                             ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
5392                                      ioc->scsiio_depth, sz,
5393                                      (sz * ioc->scsiio_depth) / 1024));
5394                 dinitprintk(ioc,
5395                             ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
5396                                      ioc->chains_per_prp_buffer));
5397                 total_sz += sz * ioc->scsiio_depth;
5398         }
5399
5400         ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
5401             ioc->chain_segment_sz, 16, 0);
5402         if (!ioc->chain_dma_pool) {
5403                 ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
5404                 goto out;
5405         }
5406         for (i = 0; i < ioc->scsiio_depth; i++) {
5407                 for (j = ioc->chains_per_prp_buffer;
5408                                 j < ioc->chains_needed_per_io; j++) {
5409                         ct = &ioc->chain_lookup[i].chains_per_smid[j];
5410                         ct->chain_buffer = dma_pool_alloc(
5411                                         ioc->chain_dma_pool, GFP_KERNEL,
5412                                         &ct->chain_buffer_dma);
5413                         if (!ct->chain_buffer) {
5414                                 ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
5415                                 goto out;
5416                         }
5417                 }
5418                 total_sz += ioc->chain_segment_sz;
5419         }
5420
5421         dinitprintk(ioc,
5422                     ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
5423                              ioc->chain_depth, ioc->chain_segment_sz,
5424                              (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
5425
5426         /* sense buffers, 4 byte align */
5427         sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
5428         ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
5429                                               4, 0);
5430         if (!ioc->sense_dma_pool) {
5431                 ioc_err(ioc, "sense pool: dma_pool_create failed\n");
5432                 goto out;
5433         }
5434         ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
5435             &ioc->sense_dma);
5436         if (!ioc->sense) {
5437                 ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
5438                 goto out;
5439         }
5440         /* sense buffer requires to be in same 4 gb region.
5441          * Below function will check the same.
5442          * In case of failure, new pci pool will be created with updated
5443          * alignment. Older allocation and pool will be destroyed.
5444          * Alignment will be used such a way that next allocation if
5445          * success, will always meet same 4gb region requirement.
5446          * Actual requirement is not alignment, but we need start and end of
5447          * DMA address must have same upper 32 bit address.
5448          */
5449         if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
5450                 //Release Sense pool & Reallocate
5451                 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
5452                 dma_pool_destroy(ioc->sense_dma_pool);
5453                 ioc->sense = NULL;
5454
5455                 ioc->sense_dma_pool =
5456                         dma_pool_create("sense pool", &ioc->pdev->dev, sz,
5457                                                 roundup_pow_of_two(sz), 0);
5458                 if (!ioc->sense_dma_pool) {
5459                         ioc_err(ioc, "sense pool: pci_pool_create failed\n");
5460                         goto out;
5461                 }
5462                 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
5463                                 &ioc->sense_dma);
5464                 if (!ioc->sense) {
5465                         ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
5466                         goto out;
5467                 }
5468         }
5469         ioc_info(ioc,
5470             "sense pool(0x%p)- dma(0x%llx): depth(%d),"
5471             "element_size(%d), pool_size(%d kB)\n",
5472             ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
5473             SCSI_SENSE_BUFFERSIZE, sz / 1024);
5474
5475         total_sz += sz;
5476
5477         /* reply pool, 4 byte align */
5478         sz = ioc->reply_free_queue_depth * ioc->reply_sz;
5479         ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
5480                                               4, 0);
5481         if (!ioc->reply_dma_pool) {
5482                 ioc_err(ioc, "reply pool: dma_pool_create failed\n");
5483                 goto out;
5484         }
5485         ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
5486             &ioc->reply_dma);
5487         if (!ioc->reply) {
5488                 ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
5489                 goto out;
5490         }
5491         ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
5492         ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
5493         dinitprintk(ioc,
5494                     ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
5495                              ioc->reply, ioc->reply_free_queue_depth,
5496                              ioc->reply_sz, sz / 1024));
5497         dinitprintk(ioc,
5498                     ioc_info(ioc, "reply_dma(0x%llx)\n",
5499                              (unsigned long long)ioc->reply_dma));
5500         total_sz += sz;
5501
5502         /* reply free queue, 16 byte align */
5503         sz = ioc->reply_free_queue_depth * 4;
5504         ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
5505             &ioc->pdev->dev, sz, 16, 0);
5506         if (!ioc->reply_free_dma_pool) {
5507                 ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
5508                 goto out;
5509         }
5510         ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
5511             &ioc->reply_free_dma);
5512         if (!ioc->reply_free) {
5513                 ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
5514                 goto out;
5515         }
5516         dinitprintk(ioc,
5517                     ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
5518                              ioc->reply_free, ioc->reply_free_queue_depth,
5519                              4, sz / 1024));
5520         dinitprintk(ioc,
5521                     ioc_info(ioc, "reply_free_dma (0x%llx)\n",
5522                              (unsigned long long)ioc->reply_free_dma));
5523         total_sz += sz;
5524
5525         if (ioc->rdpq_array_enable) {
5526                 reply_post_free_array_sz = ioc->reply_queue_count *
5527                     sizeof(Mpi2IOCInitRDPQArrayEntry);
5528                 ioc->reply_post_free_array_dma_pool =
5529                     dma_pool_create("reply_post_free_array pool",
5530                     &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
5531                 if (!ioc->reply_post_free_array_dma_pool) {
5532                         dinitprintk(ioc,
5533                                     ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
5534                         goto out;
5535                 }
5536                 ioc->reply_post_free_array =
5537                     dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
5538                     GFP_KERNEL, &ioc->reply_post_free_array_dma);
5539                 if (!ioc->reply_post_free_array) {
5540                         dinitprintk(ioc,
5541                                     ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
5542                         goto out;
5543                 }
5544         }
5545         ioc->config_page_sz = 512;
5546         ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
5547                         ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
5548         if (!ioc->config_page) {
5549                 ioc_err(ioc, "config page: dma_pool_alloc failed\n");
5550                 goto out;
5551         }
5552
5553         ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
5554             ioc->config_page, (unsigned long long)ioc->config_page_dma,
5555             ioc->config_page_sz);
5556         total_sz += ioc->config_page_sz;
5557
5558         ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
5559                  total_sz / 1024);
5560         ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
5561                  ioc->shost->can_queue, facts->RequestCredit);
5562         ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
5563                  ioc->shost->sg_tablesize);
5564         return 0;
5565
5566  out:
5567         return -ENOMEM;
5568 }
5569
5570 /**
5571  * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
5572  * @ioc: Pointer to MPT_ADAPTER structure
5573  * @cooked: Request raw or cooked IOC state
5574  *
5575  * Return: all IOC Doorbell register bits if cooked==0, else just the
5576  * Doorbell bits in MPI_IOC_STATE_MASK.
5577  */
5578 u32
5579 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
5580 {
5581         u32 s, sc;
5582
5583         s = ioc->base_readl(&ioc->chip->Doorbell);
5584         sc = s & MPI2_IOC_STATE_MASK;
5585         return cooked ? sc : s;
5586 }
5587
5588 /**
5589  * _base_wait_on_iocstate - waiting on a particular ioc state
5590  * @ioc: ?
5591  * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
5592  * @timeout: timeout in second
5593  *
5594  * Return: 0 for success, non-zero for failure.
5595  */
5596 static int
5597 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
5598 {
5599         u32 count, cntdn;
5600         u32 current_state;
5601
5602         count = 0;
5603         cntdn = 1000 * timeout;
5604         do {
5605                 current_state = mpt3sas_base_get_iocstate(ioc, 1);
5606                 if (current_state == ioc_state)
5607                         return 0;
5608                 if (count && current_state == MPI2_IOC_STATE_FAULT)
5609                         break;
5610                 if (count && current_state == MPI2_IOC_STATE_COREDUMP)
5611                         break;
5612
5613                 usleep_range(1000, 1500);
5614                 count++;
5615         } while (--cntdn);
5616
5617         return current_state;
5618 }
5619
5620 /**
5621  * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
5622  * a write to the doorbell)
5623  * @ioc: per adapter object
5624  *
5625  * Return: 0 for success, non-zero for failure.
5626  *
5627  * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
5628  */
5629
5630 static int
5631 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5632 {
5633         u32 cntdn, count;
5634         u32 int_status;
5635
5636         count = 0;
5637         cntdn = 1000 * timeout;
5638         do {
5639                 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5640                 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5641                         dhsprintk(ioc,
5642                                   ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5643                                            __func__, count, timeout));
5644                         return 0;
5645                 }
5646
5647                 usleep_range(1000, 1500);
5648                 count++;
5649         } while (--cntdn);
5650
5651         ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5652                 __func__, count, int_status);
5653         return -EFAULT;
5654 }
5655
5656 static int
5657 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5658 {
5659         u32 cntdn, count;
5660         u32 int_status;
5661
5662         count = 0;
5663         cntdn = 2000 * timeout;
5664         do {
5665                 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5666                 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5667                         dhsprintk(ioc,
5668                                   ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5669                                            __func__, count, timeout));
5670                         return 0;
5671                 }
5672
5673                 udelay(500);
5674                 count++;
5675         } while (--cntdn);
5676
5677         ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5678                 __func__, count, int_status);
5679         return -EFAULT;
5680
5681 }
5682
5683 /**
5684  * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
5685  * @ioc: per adapter object
5686  * @timeout: timeout in second
5687  *
5688  * Return: 0 for success, non-zero for failure.
5689  *
5690  * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
5691  * doorbell.
5692  */
5693 static int
5694 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
5695 {
5696         u32 cntdn, count;
5697         u32 int_status;
5698         u32 doorbell;
5699
5700         count = 0;
5701         cntdn = 1000 * timeout;
5702         do {
5703                 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5704                 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
5705                         dhsprintk(ioc,
5706                                   ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5707                                            __func__, count, timeout));
5708                         return 0;
5709                 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5710                         doorbell = ioc->base_readl(&ioc->chip->Doorbell);
5711                         if ((doorbell & MPI2_IOC_STATE_MASK) ==
5712                             MPI2_IOC_STATE_FAULT) {
5713                                 mpt3sas_print_fault_code(ioc, doorbell);
5714                                 return -EFAULT;
5715                         }
5716                         if ((doorbell & MPI2_IOC_STATE_MASK) ==
5717                             MPI2_IOC_STATE_COREDUMP) {
5718                                 mpt3sas_print_coredump_info(ioc, doorbell);
5719                                 return -EFAULT;
5720                         }
5721                 } else if (int_status == 0xFFFFFFFF)
5722                         goto out;
5723
5724                 usleep_range(1000, 1500);
5725                 count++;
5726         } while (--cntdn);
5727
5728  out:
5729         ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5730                 __func__, count, int_status);
5731         return -EFAULT;
5732 }
5733
5734 /**
5735  * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
5736  * @ioc: per adapter object
5737  * @timeout: timeout in second
5738  *
5739  * Return: 0 for success, non-zero for failure.
5740  */
5741 static int
5742 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
5743 {
5744         u32 cntdn, count;
5745         u32 doorbell_reg;
5746
5747         count = 0;
5748         cntdn = 1000 * timeout;
5749         do {
5750                 doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
5751                 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
5752                         dhsprintk(ioc,
5753                                   ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5754                                            __func__, count, timeout));
5755                         return 0;
5756                 }
5757
5758                 usleep_range(1000, 1500);
5759                 count++;
5760         } while (--cntdn);
5761
5762         ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
5763                 __func__, count, doorbell_reg);
5764         return -EFAULT;
5765 }
5766
5767 /**
5768  * _base_send_ioc_reset - send doorbell reset
5769  * @ioc: per adapter object
5770  * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
5771  * @timeout: timeout in second
5772  *
5773  * Return: 0 for success, non-zero for failure.
5774  */
5775 static int
5776 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
5777 {
5778         u32 ioc_state;
5779         int r = 0;
5780         unsigned long flags;
5781
5782         if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
5783                 ioc_err(ioc, "%s: unknown reset_type\n", __func__);
5784                 return -EFAULT;
5785         }
5786
5787         if (!(ioc->facts.IOCCapabilities &
5788            MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
5789                 return -EFAULT;
5790
5791         ioc_info(ioc, "sending message unit reset !!\n");
5792
5793         writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
5794             &ioc->chip->Doorbell);
5795         if ((_base_wait_for_doorbell_ack(ioc, 15))) {
5796                 r = -EFAULT;
5797                 goto out;
5798         }
5799
5800         ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
5801         if (ioc_state) {
5802                 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
5803                         __func__, ioc_state);
5804                 r = -EFAULT;
5805                 goto out;
5806         }
5807  out:
5808         if (r != 0) {
5809                 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5810                 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5811                 /*
5812                  * Wait for IOC state CoreDump to clear only during
5813                  * HBA initialization & release time.
5814                  */
5815                 if ((ioc_state & MPI2_IOC_STATE_MASK) ==
5816                     MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
5817                     ioc->fault_reset_work_q == NULL)) {
5818                         spin_unlock_irqrestore(
5819                             &ioc->ioc_reset_in_progress_lock, flags);
5820                         mpt3sas_print_coredump_info(ioc, ioc_state);
5821                         mpt3sas_base_wait_for_coredump_completion(ioc,
5822                             __func__);
5823                         spin_lock_irqsave(
5824                             &ioc->ioc_reset_in_progress_lock, flags);
5825                 }
5826                 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5827         }
5828         ioc_info(ioc, "message unit reset: %s\n",
5829                  r == 0 ? "SUCCESS" : "FAILED");
5830         return r;
5831 }
5832
5833 /**
5834  * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
5835  * @ioc: per adapter object
5836  * @wait_count: timeout in seconds
5837  *
5838  * Return: Waits up to timeout seconds for the IOC to
5839  * become operational. Returns 0 if IOC is present
5840  * and operational; otherwise returns -EFAULT.
5841  */
5842
5843 int
5844 mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
5845 {
5846         int wait_state_count = 0;
5847         u32 ioc_state;
5848
5849         do {
5850                 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5851                 if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
5852                         break;
5853                 ssleep(1);
5854                 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
5855                                 __func__, ++wait_state_count);
5856         } while (--timeout);
5857         if (!timeout) {
5858                 ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
5859                 return -EFAULT;
5860         }
5861         if (wait_state_count)
5862                 ioc_info(ioc, "ioc is operational\n");
5863         return 0;
5864 }
5865
5866 /**
5867  * _base_handshake_req_reply_wait - send request thru doorbell interface
5868  * @ioc: per adapter object
5869  * @request_bytes: request length
5870  * @request: pointer having request payload
5871  * @reply_bytes: reply length
5872  * @reply: pointer to reply payload
5873  * @timeout: timeout in second
5874  *
5875  * Return: 0 for success, non-zero for failure.
5876  */
5877 static int
5878 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5879         u32 *request, int reply_bytes, u16 *reply, int timeout)
5880 {
5881         MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
5882         int i;
5883         u8 failed;
5884         __le32 *mfp;
5885
5886         /* make sure doorbell is not in use */
5887         if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
5888                 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
5889                 return -EFAULT;
5890         }
5891
5892         /* clear pending doorbell interrupts from previous state changes */
5893         if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
5894             MPI2_HIS_IOC2SYS_DB_STATUS)
5895                 writel(0, &ioc->chip->HostInterruptStatus);
5896
5897         /* send message to ioc */
5898         writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
5899             ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
5900             &ioc->chip->Doorbell);
5901
5902         if ((_base_spin_on_doorbell_int(ioc, 5))) {
5903                 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5904                         __LINE__);
5905                 return -EFAULT;
5906         }
5907         writel(0, &ioc->chip->HostInterruptStatus);
5908
5909         if ((_base_wait_for_doorbell_ack(ioc, 5))) {
5910                 ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
5911                         __LINE__);
5912                 return -EFAULT;
5913         }
5914
5915         /* send message 32-bits at a time */
5916         for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
5917                 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
5918                 if ((_base_wait_for_doorbell_ack(ioc, 5)))
5919                         failed = 1;
5920         }
5921
5922         if (failed) {
5923                 ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
5924                         __LINE__);
5925                 return -EFAULT;
5926         }
5927
5928         /* now wait for the reply */
5929         if ((_base_wait_for_doorbell_int(ioc, timeout))) {
5930                 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5931                         __LINE__);
5932                 return -EFAULT;
5933         }
5934
5935         /* read the first two 16-bits, it gives the total length of the reply */
5936         reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5937             & MPI2_DOORBELL_DATA_MASK);
5938         writel(0, &ioc->chip->HostInterruptStatus);
5939         if ((_base_wait_for_doorbell_int(ioc, 5))) {
5940                 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5941                         __LINE__);
5942                 return -EFAULT;
5943         }
5944         reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5945             & MPI2_DOORBELL_DATA_MASK);
5946         writel(0, &ioc->chip->HostInterruptStatus);
5947
5948         for (i = 2; i < default_reply->MsgLength * 2; i++)  {
5949                 if ((_base_wait_for_doorbell_int(ioc, 5))) {
5950                         ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5951                                 __LINE__);
5952                         return -EFAULT;
5953                 }
5954                 if (i >=  reply_bytes/2) /* overflow case */
5955                         ioc->base_readl(&ioc->chip->Doorbell);
5956                 else
5957                         reply[i] = le16_to_cpu(
5958                             ioc->base_readl(&ioc->chip->Doorbell)
5959                             & MPI2_DOORBELL_DATA_MASK);
5960                 writel(0, &ioc->chip->HostInterruptStatus);
5961         }
5962
5963         _base_wait_for_doorbell_int(ioc, 5);
5964         if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
5965                 dhsprintk(ioc,
5966                           ioc_info(ioc, "doorbell is in use (line=%d)\n",
5967                                    __LINE__));
5968         }
5969         writel(0, &ioc->chip->HostInterruptStatus);
5970
5971         if (ioc->logging_level & MPT_DEBUG_INIT) {
5972                 mfp = (__le32 *)reply;
5973                 pr_info("\toffset:data\n");
5974                 for (i = 0; i < reply_bytes/4; i++)
5975                         ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
5976                             le32_to_cpu(mfp[i]));
5977         }
5978         return 0;
5979 }
5980
5981 /**
5982  * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
5983  * @ioc: per adapter object
5984  * @mpi_reply: the reply payload from FW
5985  * @mpi_request: the request payload sent to FW
5986  *
5987  * The SAS IO Unit Control Request message allows the host to perform low-level
5988  * operations, such as resets on the PHYs of the IO Unit, also allows the host
5989  * to obtain the IOC assigned device handles for a device if it has other
5990  * identifying information about the device, in addition allows the host to
5991  * remove IOC resources associated with the device.
5992  *
5993  * Return: 0 for success, non-zero for failure.
5994  */
5995 int
5996 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
5997         Mpi2SasIoUnitControlReply_t *mpi_reply,
5998         Mpi2SasIoUnitControlRequest_t *mpi_request)
5999 {
6000         u16 smid;
6001         u8 issue_reset = 0;
6002         int rc;
6003         void *request;
6004
6005         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6006
6007         mutex_lock(&ioc->base_cmds.mutex);
6008
6009         if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6010                 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6011                 rc = -EAGAIN;
6012                 goto out;
6013         }
6014
6015         rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6016         if (rc)
6017                 goto out;
6018
6019         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6020         if (!smid) {
6021                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6022                 rc = -EAGAIN;
6023                 goto out;
6024         }
6025
6026         rc = 0;
6027         ioc->base_cmds.status = MPT3_CMD_PENDING;
6028         request = mpt3sas_base_get_msg_frame(ioc, smid);
6029         ioc->base_cmds.smid = smid;
6030         memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
6031         if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6032             mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
6033                 ioc->ioc_link_reset_in_progress = 1;
6034         init_completion(&ioc->base_cmds.done);
6035         ioc->put_smid_default(ioc, smid);
6036         wait_for_completion_timeout(&ioc->base_cmds.done,
6037             msecs_to_jiffies(10000));
6038         if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6039             mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
6040             ioc->ioc_link_reset_in_progress)
6041                 ioc->ioc_link_reset_in_progress = 0;
6042         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6043                 mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
6044                     mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
6045                     issue_reset);
6046                 goto issue_host_reset;
6047         }
6048         if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6049                 memcpy(mpi_reply, ioc->base_cmds.reply,
6050                     sizeof(Mpi2SasIoUnitControlReply_t));
6051         else
6052                 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
6053         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6054         goto out;
6055
6056  issue_host_reset:
6057         if (issue_reset)
6058                 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6059         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6060         rc = -EFAULT;
6061  out:
6062         mutex_unlock(&ioc->base_cmds.mutex);
6063         return rc;
6064 }
6065
6066 /**
6067  * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
6068  * @ioc: per adapter object
6069  * @mpi_reply: the reply payload from FW
6070  * @mpi_request: the request payload sent to FW
6071  *
6072  * The SCSI Enclosure Processor request message causes the IOC to
6073  * communicate with SES devices to control LED status signals.
6074  *
6075  * Return: 0 for success, non-zero for failure.
6076  */
6077 int
6078 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
6079         Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
6080 {
6081         u16 smid;
6082         u8 issue_reset = 0;
6083         int rc;
6084         void *request;
6085
6086         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6087
6088         mutex_lock(&ioc->base_cmds.mutex);
6089
6090         if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6091                 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6092                 rc = -EAGAIN;
6093                 goto out;
6094         }
6095
6096         rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6097         if (rc)
6098                 goto out;
6099
6100         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6101         if (!smid) {
6102                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6103                 rc = -EAGAIN;
6104                 goto out;
6105         }
6106
6107         rc = 0;
6108         ioc->base_cmds.status = MPT3_CMD_PENDING;
6109         request = mpt3sas_base_get_msg_frame(ioc, smid);
6110         ioc->base_cmds.smid = smid;
6111         memset(request, 0, ioc->request_sz);
6112         memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
6113         init_completion(&ioc->base_cmds.done);
6114         ioc->put_smid_default(ioc, smid);
6115         wait_for_completion_timeout(&ioc->base_cmds.done,
6116             msecs_to_jiffies(10000));
6117         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6118                 mpt3sas_check_cmd_timeout(ioc,
6119                     ioc->base_cmds.status, mpi_request,
6120                     sizeof(Mpi2SepRequest_t)/4, issue_reset);
6121                 goto issue_host_reset;
6122         }
6123         if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6124                 memcpy(mpi_reply, ioc->base_cmds.reply,
6125                     sizeof(Mpi2SepReply_t));
6126         else
6127                 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
6128         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6129         goto out;
6130
6131  issue_host_reset:
6132         if (issue_reset)
6133                 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6134         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6135         rc = -EFAULT;
6136  out:
6137         mutex_unlock(&ioc->base_cmds.mutex);
6138         return rc;
6139 }
6140
6141 /**
6142  * _base_get_port_facts - obtain port facts reply and save in ioc
6143  * @ioc: per adapter object
6144  * @port: ?
6145  *
6146  * Return: 0 for success, non-zero for failure.
6147  */
6148 static int
6149 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
6150 {
6151         Mpi2PortFactsRequest_t mpi_request;
6152         Mpi2PortFactsReply_t mpi_reply;
6153         struct mpt3sas_port_facts *pfacts;
6154         int mpi_reply_sz, mpi_request_sz, r;
6155
6156         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6157
6158         mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
6159         mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
6160         memset(&mpi_request, 0, mpi_request_sz);
6161         mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
6162         mpi_request.PortNumber = port;
6163         r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6164             (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6165
6166         if (r != 0) {
6167                 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6168                 return r;
6169         }
6170
6171         pfacts = &ioc->pfacts[port];
6172         memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
6173         pfacts->PortNumber = mpi_reply.PortNumber;
6174         pfacts->VP_ID = mpi_reply.VP_ID;
6175         pfacts->VF_ID = mpi_reply.VF_ID;
6176         pfacts->MaxPostedCmdBuffers =
6177             le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
6178
6179         return 0;
6180 }
6181
6182 /**
6183  * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
6184  * @ioc: per adapter object
6185  * @timeout:
6186  *
6187  * Return: 0 for success, non-zero for failure.
6188  */
6189 static int
6190 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
6191 {
6192         u32 ioc_state;
6193         int rc;
6194
6195         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6196
6197         if (ioc->pci_error_recovery) {
6198                 dfailprintk(ioc,
6199                             ioc_info(ioc, "%s: host in pci error recovery\n",
6200                                      __func__));
6201                 return -EFAULT;
6202         }
6203
6204         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6205         dhsprintk(ioc,
6206                   ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6207                            __func__, ioc_state));
6208
6209         if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
6210             (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6211                 return 0;
6212
6213         if (ioc_state & MPI2_DOORBELL_USED) {
6214                 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
6215                 goto issue_diag_reset;
6216         }
6217
6218         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6219                 mpt3sas_print_fault_code(ioc, ioc_state &
6220                     MPI2_DOORBELL_DATA_MASK);
6221                 goto issue_diag_reset;
6222         } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
6223             MPI2_IOC_STATE_COREDUMP) {
6224                 ioc_info(ioc,
6225                     "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
6226                     __func__, ioc_state);
6227                 return -EFAULT;
6228         }
6229
6230         ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6231         if (ioc_state) {
6232                 dfailprintk(ioc,
6233                             ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6234                                      __func__, ioc_state));
6235                 return -EFAULT;
6236         }
6237
6238  issue_diag_reset:
6239         rc = _base_diag_reset(ioc);
6240         return rc;
6241 }
6242
6243 /**
6244  * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
6245  * @ioc: per adapter object
6246  *
6247  * Return: 0 for success, non-zero for failure.
6248  */
6249 static int
6250 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
6251 {
6252         Mpi2IOCFactsRequest_t mpi_request;
6253         Mpi2IOCFactsReply_t mpi_reply;
6254         struct mpt3sas_facts *facts;
6255         int mpi_reply_sz, mpi_request_sz, r;
6256
6257         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6258
6259         r = _base_wait_for_iocstate(ioc, 10);
6260         if (r) {
6261                 dfailprintk(ioc,
6262                             ioc_info(ioc, "%s: failed getting to correct state\n",
6263                                      __func__));
6264                 return r;
6265         }
6266         mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
6267         mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
6268         memset(&mpi_request, 0, mpi_request_sz);
6269         mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
6270         r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6271             (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6272
6273         if (r != 0) {
6274                 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6275                 return r;
6276         }
6277
6278         facts = &ioc->facts;
6279         memset(facts, 0, sizeof(struct mpt3sas_facts));
6280         facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
6281         facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
6282         facts->VP_ID = mpi_reply.VP_ID;
6283         facts->VF_ID = mpi_reply.VF_ID;
6284         facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
6285         facts->MaxChainDepth = mpi_reply.MaxChainDepth;
6286         facts->WhoInit = mpi_reply.WhoInit;
6287         facts->NumberOfPorts = mpi_reply.NumberOfPorts;
6288         facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
6289         if (ioc->msix_enable && (facts->MaxMSIxVectors <=
6290             MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
6291                 ioc->combined_reply_queue = 0;
6292         facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
6293         facts->MaxReplyDescriptorPostQueueDepth =
6294             le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
6295         facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
6296         facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
6297         if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
6298                 ioc->ir_firmware = 1;
6299         if ((facts->IOCCapabilities &
6300               MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
6301                 ioc->rdpq_array_capable = 1;
6302         if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
6303             && ioc->is_aero_ioc)
6304                 ioc->atomic_desc_capable = 1;
6305         facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
6306         facts->IOCRequestFrameSize =
6307             le16_to_cpu(mpi_reply.IOCRequestFrameSize);
6308         if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6309                 facts->IOCMaxChainSegmentSize =
6310                         le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
6311         }
6312         facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
6313         facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
6314         ioc->shost->max_id = -1;
6315         facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
6316         facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
6317         facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
6318         facts->HighPriorityCredit =
6319             le16_to_cpu(mpi_reply.HighPriorityCredit);
6320         facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
6321         facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
6322         facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
6323
6324         /*
6325          * Get the Page Size from IOC Facts. If it's 0, default to 4k.
6326          */
6327         ioc->page_size = 1 << facts->CurrentHostPageSize;
6328         if (ioc->page_size == 1) {
6329                 ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
6330                 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
6331         }
6332         dinitprintk(ioc,
6333                     ioc_info(ioc, "CurrentHostPageSize(%d)\n",
6334                              facts->CurrentHostPageSize));
6335
6336         dinitprintk(ioc,
6337                     ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
6338                              facts->RequestCredit, facts->MaxChainDepth));
6339         dinitprintk(ioc,
6340                     ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
6341                              facts->IOCRequestFrameSize * 4,
6342                              facts->ReplyFrameSize * 4));
6343         return 0;
6344 }
6345
6346 /**
6347  * _base_send_ioc_init - send ioc_init to firmware
6348  * @ioc: per adapter object
6349  *
6350  * Return: 0 for success, non-zero for failure.
6351  */
6352 static int
6353 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
6354 {
6355         Mpi2IOCInitRequest_t mpi_request;
6356         Mpi2IOCInitReply_t mpi_reply;
6357         int i, r = 0;
6358         ktime_t current_time;
6359         u16 ioc_status;
6360         u32 reply_post_free_array_sz = 0;
6361
6362         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6363
6364         memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
6365         mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
6366         mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
6367         mpi_request.VF_ID = 0; /* TODO */
6368         mpi_request.VP_ID = 0;
6369         mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
6370         mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
6371         mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
6372
6373         if (_base_is_controller_msix_enabled(ioc))
6374                 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
6375         mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
6376         mpi_request.ReplyDescriptorPostQueueDepth =
6377             cpu_to_le16(ioc->reply_post_queue_depth);
6378         mpi_request.ReplyFreeQueueDepth =
6379             cpu_to_le16(ioc->reply_free_queue_depth);
6380
6381         mpi_request.SenseBufferAddressHigh =
6382             cpu_to_le32((u64)ioc->sense_dma >> 32);
6383         mpi_request.SystemReplyAddressHigh =
6384             cpu_to_le32((u64)ioc->reply_dma >> 32);
6385         mpi_request.SystemRequestFrameBaseAddress =
6386             cpu_to_le64((u64)ioc->request_dma);
6387         mpi_request.ReplyFreeQueueAddress =
6388             cpu_to_le64((u64)ioc->reply_free_dma);
6389
6390         if (ioc->rdpq_array_enable) {
6391                 reply_post_free_array_sz = ioc->reply_queue_count *
6392                     sizeof(Mpi2IOCInitRDPQArrayEntry);
6393                 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
6394                 for (i = 0; i < ioc->reply_queue_count; i++)
6395                         ioc->reply_post_free_array[i].RDPQBaseAddress =
6396                             cpu_to_le64(
6397                                 (u64)ioc->reply_post[i].reply_post_free_dma);
6398                 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
6399                 mpi_request.ReplyDescriptorPostQueueAddress =
6400                     cpu_to_le64((u64)ioc->reply_post_free_array_dma);
6401         } else {
6402                 mpi_request.ReplyDescriptorPostQueueAddress =
6403                     cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
6404         }
6405
6406         /*
6407          * Set the flag to enable CoreDump state feature in IOC firmware.
6408          */
6409         mpi_request.ConfigurationFlags |=
6410             cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
6411
6412         /* This time stamp specifies number of milliseconds
6413          * since epoch ~ midnight January 1, 1970.
6414          */
6415         current_time = ktime_get_real();
6416         mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
6417
6418         if (ioc->logging_level & MPT_DEBUG_INIT) {
6419                 __le32 *mfp;
6420                 int i;
6421
6422                 mfp = (__le32 *)&mpi_request;
6423                 ioc_info(ioc, "\toffset:data\n");
6424                 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
6425                         ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
6426                             le32_to_cpu(mfp[i]));
6427         }
6428
6429         r = _base_handshake_req_reply_wait(ioc,
6430             sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
6431             sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
6432
6433         if (r != 0) {
6434                 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6435                 return r;
6436         }
6437
6438         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6439         if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
6440             mpi_reply.IOCLogInfo) {
6441                 ioc_err(ioc, "%s: failed\n", __func__);
6442                 r = -EIO;
6443         }
6444
6445         return r;
6446 }
6447
6448 /**
6449  * mpt3sas_port_enable_done - command completion routine for port enable
6450  * @ioc: per adapter object
6451  * @smid: system request message index
6452  * @msix_index: MSIX table index supplied by the OS
6453  * @reply: reply message frame(lower 32bit addr)
6454  *
6455  * Return: 1 meaning mf should be freed from _base_interrupt
6456  *          0 means the mf is freed from this function.
6457  */
6458 u8
6459 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
6460         u32 reply)
6461 {
6462         MPI2DefaultReply_t *mpi_reply;
6463         u16 ioc_status;
6464
6465         if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
6466                 return 1;
6467
6468         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
6469         if (!mpi_reply)
6470                 return 1;
6471
6472         if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
6473                 return 1;
6474
6475         ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
6476         ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
6477         ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
6478         memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
6479         ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
6480         if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6481                 ioc->port_enable_failed = 1;
6482
6483         if (ioc->is_driver_loading) {
6484                 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
6485                         mpt3sas_port_enable_complete(ioc);
6486                         return 1;
6487                 } else {
6488                         ioc->start_scan_failed = ioc_status;
6489                         ioc->start_scan = 0;
6490                         return 1;
6491                 }
6492         }
6493         complete(&ioc->port_enable_cmds.done);
6494         return 1;
6495 }
6496
6497 /**
6498  * _base_send_port_enable - send port_enable(discovery stuff) to firmware
6499  * @ioc: per adapter object
6500  *
6501  * Return: 0 for success, non-zero for failure.
6502  */
6503 static int
6504 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
6505 {
6506         Mpi2PortEnableRequest_t *mpi_request;
6507         Mpi2PortEnableReply_t *mpi_reply;
6508         int r = 0;
6509         u16 smid;
6510         u16 ioc_status;
6511
6512         ioc_info(ioc, "sending port enable !!\n");
6513
6514         if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6515                 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6516                 return -EAGAIN;
6517         }
6518
6519         smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
6520         if (!smid) {
6521                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6522                 return -EAGAIN;
6523         }
6524
6525         ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
6526         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6527         ioc->port_enable_cmds.smid = smid;
6528         memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
6529         mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
6530
6531         init_completion(&ioc->port_enable_cmds.done);
6532         ioc->put_smid_default(ioc, smid);
6533         wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
6534         if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
6535                 ioc_err(ioc, "%s: timeout\n", __func__);
6536                 _debug_dump_mf(mpi_request,
6537                     sizeof(Mpi2PortEnableRequest_t)/4);
6538                 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
6539                         r = -EFAULT;
6540                 else
6541                         r = -ETIME;
6542                 goto out;
6543         }
6544
6545         mpi_reply = ioc->port_enable_cmds.reply;
6546         ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
6547         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6548                 ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
6549                         __func__, ioc_status);
6550                 r = -EFAULT;
6551                 goto out;
6552         }
6553
6554  out:
6555         ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
6556         ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
6557         return r;
6558 }
6559
6560 /**
6561  * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
6562  * @ioc: per adapter object
6563  *
6564  * Return: 0 for success, non-zero for failure.
6565  */
6566 int
6567 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
6568 {
6569         Mpi2PortEnableRequest_t *mpi_request;
6570         u16 smid;
6571
6572         ioc_info(ioc, "sending port enable !!\n");
6573
6574         if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6575                 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6576                 return -EAGAIN;
6577         }
6578
6579         smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
6580         if (!smid) {
6581                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6582                 return -EAGAIN;
6583         }
6584
6585         ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
6586         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6587         ioc->port_enable_cmds.smid = smid;
6588         memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
6589         mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
6590
6591         ioc->put_smid_default(ioc, smid);
6592         return 0;
6593 }
6594
6595 /**
6596  * _base_determine_wait_on_discovery - desposition
6597  * @ioc: per adapter object
6598  *
6599  * Decide whether to wait on discovery to complete. Used to either
6600  * locate boot device, or report volumes ahead of physical devices.
6601  *
6602  * Return: 1 for wait, 0 for don't wait.
6603  */
6604 static int
6605 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
6606 {
6607         /* We wait for discovery to complete if IR firmware is loaded.
6608          * The sas topology events arrive before PD events, so we need time to
6609          * turn on the bit in ioc->pd_handles to indicate PD
6610          * Also, it maybe required to report Volumes ahead of physical
6611          * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
6612          */
6613         if (ioc->ir_firmware)
6614                 return 1;
6615
6616         /* if no Bios, then we don't need to wait */
6617         if (!ioc->bios_pg3.BiosVersion)
6618                 return 0;
6619
6620         /* Bios is present, then we drop down here.
6621          *
6622          * If there any entries in the Bios Page 2, then we wait
6623          * for discovery to complete.
6624          */
6625
6626         /* Current Boot Device */
6627         if ((ioc->bios_pg2.CurrentBootDeviceForm &
6628             MPI2_BIOSPAGE2_FORM_MASK) ==
6629             MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6630         /* Request Boot Device */
6631            (ioc->bios_pg2.ReqBootDeviceForm &
6632             MPI2_BIOSPAGE2_FORM_MASK) ==
6633             MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6634         /* Alternate Request Boot Device */
6635            (ioc->bios_pg2.ReqAltBootDeviceForm &
6636             MPI2_BIOSPAGE2_FORM_MASK) ==
6637             MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
6638                 return 0;
6639
6640         return 1;
6641 }
6642
6643 /**
6644  * _base_unmask_events - turn on notification for this event
6645  * @ioc: per adapter object
6646  * @event: firmware event
6647  *
6648  * The mask is stored in ioc->event_masks.
6649  */
6650 static void
6651 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
6652 {
6653         u32 desired_event;
6654
6655         if (event >= 128)
6656                 return;
6657
6658         desired_event = (1 << (event % 32));
6659
6660         if (event < 32)
6661                 ioc->event_masks[0] &= ~desired_event;
6662         else if (event < 64)
6663                 ioc->event_masks[1] &= ~desired_event;
6664         else if (event < 96)
6665                 ioc->event_masks[2] &= ~desired_event;
6666         else if (event < 128)
6667                 ioc->event_masks[3] &= ~desired_event;
6668 }
6669
6670 /**
6671  * _base_event_notification - send event notification
6672  * @ioc: per adapter object
6673  *
6674  * Return: 0 for success, non-zero for failure.
6675  */
6676 static int
6677 _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
6678 {
6679         Mpi2EventNotificationRequest_t *mpi_request;
6680         u16 smid;
6681         int r = 0;
6682         int i;
6683
6684         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6685
6686         if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
6687                 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6688                 return -EAGAIN;
6689         }
6690
6691         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6692         if (!smid) {
6693                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6694                 return -EAGAIN;
6695         }
6696         ioc->base_cmds.status = MPT3_CMD_PENDING;
6697         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6698         ioc->base_cmds.smid = smid;
6699         memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
6700         mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
6701         mpi_request->VF_ID = 0; /* TODO */
6702         mpi_request->VP_ID = 0;
6703         for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
6704                 mpi_request->EventMasks[i] =
6705                     cpu_to_le32(ioc->event_masks[i]);
6706         init_completion(&ioc->base_cmds.done);
6707         ioc->put_smid_default(ioc, smid);
6708         wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
6709         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6710                 ioc_err(ioc, "%s: timeout\n", __func__);
6711                 _debug_dump_mf(mpi_request,
6712                     sizeof(Mpi2EventNotificationRequest_t)/4);
6713                 if (ioc->base_cmds.status & MPT3_CMD_RESET)
6714                         r = -EFAULT;
6715                 else
6716                         r = -ETIME;
6717         } else
6718                 dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
6719         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6720         return r;
6721 }
6722
6723 /**
6724  * mpt3sas_base_validate_event_type - validating event types
6725  * @ioc: per adapter object
6726  * @event_type: firmware event
6727  *
6728  * This will turn on firmware event notification when application
6729  * ask for that event. We don't mask events that are already enabled.
6730  */
6731 void
6732 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
6733 {
6734         int i, j;
6735         u32 event_mask, desired_event;
6736         u8 send_update_to_fw;
6737
6738         for (i = 0, send_update_to_fw = 0; i <
6739             MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
6740                 event_mask = ~event_type[i];
6741                 desired_event = 1;
6742                 for (j = 0; j < 32; j++) {
6743                         if (!(event_mask & desired_event) &&
6744                             (ioc->event_masks[i] & desired_event)) {
6745                                 ioc->event_masks[i] &= ~desired_event;
6746                                 send_update_to_fw = 1;
6747                         }
6748                         desired_event = (desired_event << 1);
6749                 }
6750         }
6751
6752         if (!send_update_to_fw)
6753                 return;
6754
6755         mutex_lock(&ioc->base_cmds.mutex);
6756         _base_event_notification(ioc);
6757         mutex_unlock(&ioc->base_cmds.mutex);
6758 }
6759
6760 /**
6761  * _base_diag_reset - the "big hammer" start of day reset
6762  * @ioc: per adapter object
6763  *
6764  * Return: 0 for success, non-zero for failure.
6765  */
6766 static int
6767 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
6768 {
6769         u32 host_diagnostic;
6770         u32 ioc_state;
6771         u32 count;
6772         u32 hcb_size;
6773
6774         ioc_info(ioc, "sending diag reset !!\n");
6775
6776         drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
6777
6778         count = 0;
6779         do {
6780                 /* Write magic sequence to WriteSequence register
6781                  * Loop until in diagnostic mode
6782                  */
6783                 drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
6784                 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6785                 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
6786                 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
6787                 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
6788                 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
6789                 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
6790                 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
6791
6792                 /* wait 100 msec */
6793                 msleep(100);
6794
6795                 if (count++ > 20) {
6796                         ioc_info(ioc,
6797                             "Stop writing magic sequence after 20 retries\n");
6798                         goto out;
6799                 }
6800
6801                 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6802                 drsprintk(ioc,
6803                           ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
6804                                    count, host_diagnostic));
6805
6806         } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
6807
6808         hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
6809
6810         drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
6811         writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
6812              &ioc->chip->HostDiagnostic);
6813
6814         /*This delay allows the chip PCIe hardware time to finish reset tasks*/
6815         msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
6816
6817         /* Approximately 300 second max wait */
6818         for (count = 0; count < (300000000 /
6819                 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
6820
6821                 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6822
6823                 if (host_diagnostic == 0xFFFFFFFF) {
6824                         ioc_info(ioc,
6825                             "Invalid host diagnostic register value\n");
6826                         goto out;
6827                 }
6828                 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
6829                         break;
6830
6831                 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
6832         }
6833
6834         if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
6835
6836                 drsprintk(ioc,
6837                           ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
6838                 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
6839                 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
6840                 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
6841
6842                 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
6843                 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
6844                     &ioc->chip->HCBSize);
6845         }
6846
6847         drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
6848         writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
6849             &ioc->chip->HostDiagnostic);
6850
6851         drsprintk(ioc,
6852                   ioc_info(ioc, "disable writes to the diagnostic register\n"));
6853         writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6854
6855         drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
6856         ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
6857         if (ioc_state) {
6858                 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6859                         __func__, ioc_state);
6860                 goto out;
6861         }
6862
6863         ioc_info(ioc, "diag reset: SUCCESS\n");
6864         return 0;
6865
6866  out:
6867         ioc_err(ioc, "diag reset: FAILED\n");
6868         return -EFAULT;
6869 }
6870
6871 /**
6872  * _base_make_ioc_ready - put controller in READY state
6873  * @ioc: per adapter object
6874  * @type: FORCE_BIG_HAMMER or SOFT_RESET
6875  *
6876  * Return: 0 for success, non-zero for failure.
6877  */
6878 static int
6879 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
6880 {
6881         u32 ioc_state;
6882         int rc;
6883         int count;
6884
6885         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6886
6887         if (ioc->pci_error_recovery)
6888                 return 0;
6889
6890         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6891         dhsprintk(ioc,
6892                   ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6893                            __func__, ioc_state));
6894
6895         /* if in RESET state, it should move to READY state shortly */
6896         count = 0;
6897         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
6898                 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
6899                     MPI2_IOC_STATE_READY) {
6900                         if (count++ == 10) {
6901                                 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6902                                         __func__, ioc_state);
6903                                 return -EFAULT;
6904                         }
6905                         ssleep(1);
6906                         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6907                 }
6908         }
6909
6910         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
6911                 return 0;
6912
6913         if (ioc_state & MPI2_DOORBELL_USED) {
6914                 ioc_info(ioc, "unexpected doorbell active!\n");
6915                 goto issue_diag_reset;
6916         }
6917
6918         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6919                 mpt3sas_print_fault_code(ioc, ioc_state &
6920                     MPI2_DOORBELL_DATA_MASK);
6921                 goto issue_diag_reset;
6922         }
6923
6924         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
6925                 /*
6926                  * if host reset is invoked while watch dog thread is waiting
6927                  * for IOC state to be changed to Fault state then driver has
6928                  * to wait here for CoreDump state to clear otherwise reset
6929                  * will be issued to the FW and FW move the IOC state to
6930                  * reset state without copying the FW logs to coredump region.
6931                  */
6932                 if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
6933                         mpt3sas_print_coredump_info(ioc, ioc_state &
6934                             MPI2_DOORBELL_DATA_MASK);
6935                         mpt3sas_base_wait_for_coredump_completion(ioc,
6936                             __func__);
6937                 }
6938                 goto issue_diag_reset;
6939         }
6940
6941         if (type == FORCE_BIG_HAMMER)
6942                 goto issue_diag_reset;
6943
6944         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6945                 if (!(_base_send_ioc_reset(ioc,
6946                     MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
6947                         return 0;
6948         }
6949
6950  issue_diag_reset:
6951         rc = _base_diag_reset(ioc);
6952         return rc;
6953 }
6954
6955 /**
6956  * _base_make_ioc_operational - put controller in OPERATIONAL state
6957  * @ioc: per adapter object
6958  *
6959  * Return: 0 for success, non-zero for failure.
6960  */
6961 static int
6962 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6963 {
6964         int r, i, index, rc;
6965         unsigned long   flags;
6966         u32 reply_address;
6967         u16 smid;
6968         struct _tr_list *delayed_tr, *delayed_tr_next;
6969         struct _sc_list *delayed_sc, *delayed_sc_next;
6970         struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
6971         u8 hide_flag;
6972         struct adapter_reply_queue *reply_q;
6973         Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
6974
6975         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6976
6977         /* clean the delayed target reset list */
6978         list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6979             &ioc->delayed_tr_list, list) {
6980                 list_del(&delayed_tr->list);
6981                 kfree(delayed_tr);
6982         }
6983
6984
6985         list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6986             &ioc->delayed_tr_volume_list, list) {
6987                 list_del(&delayed_tr->list);
6988                 kfree(delayed_tr);
6989         }
6990
6991         list_for_each_entry_safe(delayed_sc, delayed_sc_next,
6992             &ioc->delayed_sc_list, list) {
6993                 list_del(&delayed_sc->list);
6994                 kfree(delayed_sc);
6995         }
6996
6997         list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
6998             &ioc->delayed_event_ack_list, list) {
6999                 list_del(&delayed_event_ack->list);
7000                 kfree(delayed_event_ack);
7001         }
7002
7003         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7004
7005         /* hi-priority queue */
7006         INIT_LIST_HEAD(&ioc->hpr_free_list);
7007         smid = ioc->hi_priority_smid;
7008         for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
7009                 ioc->hpr_lookup[i].cb_idx = 0xFF;
7010                 ioc->hpr_lookup[i].smid = smid;
7011                 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
7012                     &ioc->hpr_free_list);
7013         }
7014
7015         /* internal queue */
7016         INIT_LIST_HEAD(&ioc->internal_free_list);
7017         smid = ioc->internal_smid;
7018         for (i = 0; i < ioc->internal_depth; i++, smid++) {
7019                 ioc->internal_lookup[i].cb_idx = 0xFF;
7020                 ioc->internal_lookup[i].smid = smid;
7021                 list_add_tail(&ioc->internal_lookup[i].tracker_list,
7022                     &ioc->internal_free_list);
7023         }
7024
7025         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7026
7027         /* initialize Reply Free Queue */
7028         for (i = 0, reply_address = (u32)ioc->reply_dma ;
7029             i < ioc->reply_free_queue_depth ; i++, reply_address +=
7030             ioc->reply_sz) {
7031                 ioc->reply_free[i] = cpu_to_le32(reply_address);
7032                 if (ioc->is_mcpu_endpoint)
7033                         _base_clone_reply_to_sys_mem(ioc,
7034                                         reply_address, i);
7035         }
7036
7037         /* initialize reply queues */
7038         if (ioc->is_driver_loading)
7039                 _base_assign_reply_queues(ioc);
7040
7041         /* initialize Reply Post Free Queue */
7042         index = 0;
7043         reply_post_free_contig = ioc->reply_post[0].reply_post_free;
7044         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7045                 /*
7046                  * If RDPQ is enabled, switch to the next allocation.
7047                  * Otherwise advance within the contiguous region.
7048                  */
7049                 if (ioc->rdpq_array_enable) {
7050                         reply_q->reply_post_free =
7051                                 ioc->reply_post[index++].reply_post_free;
7052                 } else {
7053                         reply_q->reply_post_free = reply_post_free_contig;
7054                         reply_post_free_contig += ioc->reply_post_queue_depth;
7055                 }
7056
7057                 reply_q->reply_post_host_index = 0;
7058                 for (i = 0; i < ioc->reply_post_queue_depth; i++)
7059                         reply_q->reply_post_free[i].Words =
7060                             cpu_to_le64(ULLONG_MAX);
7061                 if (!_base_is_controller_msix_enabled(ioc))
7062                         goto skip_init_reply_post_free_queue;
7063         }
7064  skip_init_reply_post_free_queue:
7065
7066         r = _base_send_ioc_init(ioc);
7067         if (r) {
7068                 /*
7069                  * No need to check IOC state for fault state & issue
7070                  * diag reset during host reset. This check is need
7071                  * only during driver load time.
7072                  */
7073                 if (!ioc->is_driver_loading)
7074                         return r;
7075
7076                 rc = _base_check_for_fault_and_issue_reset(ioc);
7077                 if (rc || (_base_send_ioc_init(ioc)))
7078                         return r;
7079         }
7080
7081         /* initialize reply free host index */
7082         ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
7083         writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
7084
7085         /* initialize reply post host index */
7086         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7087                 if (ioc->combined_reply_queue)
7088                         writel((reply_q->msix_index & 7)<<
7089                            MPI2_RPHI_MSIX_INDEX_SHIFT,
7090                            ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
7091                 else
7092                         writel(reply_q->msix_index <<
7093                                 MPI2_RPHI_MSIX_INDEX_SHIFT,
7094                                 &ioc->chip->ReplyPostHostIndex);
7095
7096                 if (!_base_is_controller_msix_enabled(ioc))
7097                         goto skip_init_reply_post_host_index;
7098         }
7099
7100  skip_init_reply_post_host_index:
7101
7102         _base_unmask_interrupts(ioc);
7103
7104         if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
7105                 r = _base_display_fwpkg_version(ioc);
7106                 if (r)
7107                         return r;
7108         }
7109
7110         _base_static_config_pages(ioc);
7111         r = _base_event_notification(ioc);
7112         if (r)
7113                 return r;
7114
7115         if (ioc->is_driver_loading) {
7116
7117                 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
7118                     == 0x80) {
7119                         hide_flag = (u8) (
7120                             le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
7121                             MFG_PAGE10_HIDE_SSDS_MASK);
7122                         if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
7123                                 ioc->mfg_pg10_hide_flag = hide_flag;
7124                 }
7125
7126                 ioc->wait_for_discovery_to_complete =
7127                     _base_determine_wait_on_discovery(ioc);
7128
7129                 return r; /* scan_start and scan_finished support */
7130         }
7131
7132         r = _base_send_port_enable(ioc);
7133         if (r)
7134                 return r;
7135
7136         return r;
7137 }
7138
7139 /**
7140  * mpt3sas_base_free_resources - free resources controller resources
7141  * @ioc: per adapter object
7142  */
7143 void
7144 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
7145 {
7146         dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7147
7148         /* synchronizing freeing resource with pci_access_mutex lock */
7149         mutex_lock(&ioc->pci_access_mutex);
7150         if (ioc->chip_phys && ioc->chip) {
7151                 _base_mask_interrupts(ioc);
7152                 ioc->shost_recovery = 1;
7153                 _base_make_ioc_ready(ioc, SOFT_RESET);
7154                 ioc->shost_recovery = 0;
7155         }
7156
7157         mpt3sas_base_unmap_resources(ioc);
7158         mutex_unlock(&ioc->pci_access_mutex);
7159         return;
7160 }
7161
7162 /**
7163  * mpt3sas_base_attach - attach controller instance
7164  * @ioc: per adapter object
7165  *
7166  * Return: 0 for success, non-zero for failure.
7167  */
7168 int
7169 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
7170 {
7171         int r, i, rc;
7172         int cpu_id, last_cpu_id = 0;
7173
7174         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7175
7176         /* setup cpu_msix_table */
7177         ioc->cpu_count = num_online_cpus();
7178         for_each_online_cpu(cpu_id)
7179                 last_cpu_id = cpu_id;
7180         ioc->cpu_msix_table_sz = last_cpu_id + 1;
7181         ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
7182         ioc->reply_queue_count = 1;
7183         if (!ioc->cpu_msix_table) {
7184                 ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
7185                 r = -ENOMEM;
7186                 goto out_free_resources;
7187         }
7188
7189         if (ioc->is_warpdrive) {
7190                 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
7191                     sizeof(resource_size_t *), GFP_KERNEL);
7192                 if (!ioc->reply_post_host_index) {
7193                         ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
7194                         r = -ENOMEM;
7195                         goto out_free_resources;
7196                 }
7197         }
7198
7199         ioc->smp_affinity_enable = smp_affinity_enable;
7200
7201         ioc->rdpq_array_enable_assigned = 0;
7202         ioc->use_32bit_dma = false;
7203         if (ioc->is_aero_ioc)
7204                 ioc->base_readl = &_base_readl_aero;
7205         else
7206                 ioc->base_readl = &_base_readl;
7207         r = mpt3sas_base_map_resources(ioc);
7208         if (r)
7209                 goto out_free_resources;
7210
7211         pci_set_drvdata(ioc->pdev, ioc->shost);
7212         r = _base_get_ioc_facts(ioc);
7213         if (r) {
7214                 rc = _base_check_for_fault_and_issue_reset(ioc);
7215                 if (rc || (_base_get_ioc_facts(ioc)))
7216                         goto out_free_resources;
7217         }
7218
7219         switch (ioc->hba_mpi_version_belonged) {
7220         case MPI2_VERSION:
7221                 ioc->build_sg_scmd = &_base_build_sg_scmd;
7222                 ioc->build_sg = &_base_build_sg;
7223                 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
7224                 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7225                 break;
7226         case MPI25_VERSION:
7227         case MPI26_VERSION:
7228                 /*
7229                  * In SAS3.0,
7230                  * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
7231                  * Target Status - all require the IEEE formated scatter gather
7232                  * elements.
7233                  */
7234                 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
7235                 ioc->build_sg = &_base_build_sg_ieee;
7236                 ioc->build_nvme_prp = &_base_build_nvme_prp;
7237                 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
7238                 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
7239                 if (ioc->high_iops_queues)
7240                         ioc->get_msix_index_for_smlio =
7241                                         &_base_get_high_iops_msix_index;
7242                 else
7243                         ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7244                 break;
7245         }
7246         if (ioc->atomic_desc_capable) {
7247                 ioc->put_smid_default = &_base_put_smid_default_atomic;
7248                 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
7249                 ioc->put_smid_fast_path =
7250                                 &_base_put_smid_fast_path_atomic;
7251                 ioc->put_smid_hi_priority =
7252                                 &_base_put_smid_hi_priority_atomic;
7253         } else {
7254                 ioc->put_smid_default = &_base_put_smid_default;
7255                 ioc->put_smid_fast_path = &_base_put_smid_fast_path;
7256                 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
7257                 if (ioc->is_mcpu_endpoint)
7258                         ioc->put_smid_scsi_io =
7259                                 &_base_put_smid_mpi_ep_scsi_io;
7260                 else
7261                         ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
7262         }
7263         /*
7264          * These function pointers for other requests that don't
7265          * the require IEEE scatter gather elements.
7266          *
7267          * For example Configuration Pages and SAS IOUNIT Control don't.
7268          */
7269         ioc->build_sg_mpi = &_base_build_sg;
7270         ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
7271
7272         r = _base_make_ioc_ready(ioc, SOFT_RESET);
7273         if (r)
7274                 goto out_free_resources;
7275
7276         ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
7277             sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
7278         if (!ioc->pfacts) {
7279                 r = -ENOMEM;
7280                 goto out_free_resources;
7281         }
7282
7283         for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
7284                 r = _base_get_port_facts(ioc, i);
7285                 if (r) {
7286                         rc = _base_check_for_fault_and_issue_reset(ioc);
7287                         if (rc || (_base_get_port_facts(ioc, i)))
7288                                 goto out_free_resources;
7289                 }
7290         }
7291
7292         r = _base_allocate_memory_pools(ioc);
7293         if (r)
7294                 goto out_free_resources;
7295
7296         if (irqpoll_weight > 0)
7297                 ioc->thresh_hold = irqpoll_weight;
7298         else
7299                 ioc->thresh_hold = ioc->hba_queue_depth/4;
7300
7301         _base_init_irqpolls(ioc);
7302         init_waitqueue_head(&ioc->reset_wq);
7303
7304         /* allocate memory pd handle bitmask list */
7305         ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
7306         if (ioc->facts.MaxDevHandle % 8)
7307                 ioc->pd_handles_sz++;
7308         ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
7309             GFP_KERNEL);
7310         if (!ioc->pd_handles) {
7311                 r = -ENOMEM;
7312                 goto out_free_resources;
7313         }
7314         ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
7315             GFP_KERNEL);
7316         if (!ioc->blocking_handles) {
7317                 r = -ENOMEM;
7318                 goto out_free_resources;
7319         }
7320
7321         /* allocate memory for pending OS device add list */
7322         ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
7323         if (ioc->facts.MaxDevHandle % 8)
7324                 ioc->pend_os_device_add_sz++;
7325         ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
7326             GFP_KERNEL);
7327         if (!ioc->pend_os_device_add)
7328                 goto out_free_resources;
7329
7330         ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
7331         ioc->device_remove_in_progress =
7332                 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
7333         if (!ioc->device_remove_in_progress)
7334                 goto out_free_resources;
7335
7336         ioc->fwfault_debug = mpt3sas_fwfault_debug;
7337
7338         /* base internal command bits */
7339         mutex_init(&ioc->base_cmds.mutex);
7340         ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7341         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7342
7343         /* port_enable command bits */
7344         ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7345         ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
7346
7347         /* transport internal command bits */
7348         ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7349         ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
7350         mutex_init(&ioc->transport_cmds.mutex);
7351
7352         /* scsih internal command bits */
7353         ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7354         ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7355         mutex_init(&ioc->scsih_cmds.mutex);
7356
7357         /* task management internal command bits */
7358         ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7359         ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
7360         mutex_init(&ioc->tm_cmds.mutex);
7361
7362         /* config page internal command bits */
7363         ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7364         ioc->config_cmds.status = MPT3_CMD_NOT_USED;
7365         mutex_init(&ioc->config_cmds.mutex);
7366
7367         /* ctl module internal command bits */
7368         ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7369         ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
7370         ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
7371         mutex_init(&ioc->ctl_cmds.mutex);
7372
7373         if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
7374             !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
7375             !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
7376             !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
7377                 r = -ENOMEM;
7378                 goto out_free_resources;
7379         }
7380
7381         for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
7382                 ioc->event_masks[i] = -1;
7383
7384         /* here we enable the events we care about */
7385         _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
7386         _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
7387         _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
7388         _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
7389         _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
7390         _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
7391         _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
7392         _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
7393         _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
7394         _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
7395         _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
7396         _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
7397         _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
7398         if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
7399                 if (ioc->is_gen35_ioc) {
7400                         _base_unmask_events(ioc,
7401                                 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
7402                         _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
7403                         _base_unmask_events(ioc,
7404                                 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
7405                 }
7406         }
7407         r = _base_make_ioc_operational(ioc);
7408         if (r)
7409                 goto out_free_resources;
7410
7411         /*
7412          * Copy current copy of IOCFacts in prev_fw_facts
7413          * and it will be used during online firmware upgrade.
7414          */
7415         memcpy(&ioc->prev_fw_facts, &ioc->facts,
7416             sizeof(struct mpt3sas_facts));
7417
7418         ioc->non_operational_loop = 0;
7419         ioc->ioc_coredump_loop = 0;
7420         ioc->got_task_abort_from_ioctl = 0;
7421         return 0;
7422
7423  out_free_resources:
7424
7425         ioc->remove_host = 1;
7426
7427         mpt3sas_base_free_resources(ioc);
7428         _base_release_memory_pools(ioc);
7429         pci_set_drvdata(ioc->pdev, NULL);
7430         kfree(ioc->cpu_msix_table);
7431         if (ioc->is_warpdrive)
7432                 kfree(ioc->reply_post_host_index);
7433         kfree(ioc->pd_handles);
7434         kfree(ioc->blocking_handles);
7435         kfree(ioc->device_remove_in_progress);
7436         kfree(ioc->pend_os_device_add);
7437         kfree(ioc->tm_cmds.reply);
7438         kfree(ioc->transport_cmds.reply);
7439         kfree(ioc->scsih_cmds.reply);
7440         kfree(ioc->config_cmds.reply);
7441         kfree(ioc->base_cmds.reply);
7442         kfree(ioc->port_enable_cmds.reply);
7443         kfree(ioc->ctl_cmds.reply);
7444         kfree(ioc->ctl_cmds.sense);
7445         kfree(ioc->pfacts);
7446         ioc->ctl_cmds.reply = NULL;
7447         ioc->base_cmds.reply = NULL;
7448         ioc->tm_cmds.reply = NULL;
7449         ioc->scsih_cmds.reply = NULL;
7450         ioc->transport_cmds.reply = NULL;
7451         ioc->config_cmds.reply = NULL;
7452         ioc->pfacts = NULL;
7453         return r;
7454 }
7455
7456
7457 /**
7458  * mpt3sas_base_detach - remove controller instance
7459  * @ioc: per adapter object
7460  */
7461 void
7462 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
7463 {
7464         dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7465
7466         mpt3sas_base_stop_watchdog(ioc);
7467         mpt3sas_base_free_resources(ioc);
7468         _base_release_memory_pools(ioc);
7469         mpt3sas_free_enclosure_list(ioc);
7470         pci_set_drvdata(ioc->pdev, NULL);
7471         kfree(ioc->cpu_msix_table);
7472         if (ioc->is_warpdrive)
7473                 kfree(ioc->reply_post_host_index);
7474         kfree(ioc->pd_handles);
7475         kfree(ioc->blocking_handles);
7476         kfree(ioc->device_remove_in_progress);
7477         kfree(ioc->pend_os_device_add);
7478         kfree(ioc->pfacts);
7479         kfree(ioc->ctl_cmds.reply);
7480         kfree(ioc->ctl_cmds.sense);
7481         kfree(ioc->base_cmds.reply);
7482         kfree(ioc->port_enable_cmds.reply);
7483         kfree(ioc->tm_cmds.reply);
7484         kfree(ioc->transport_cmds.reply);
7485         kfree(ioc->scsih_cmds.reply);
7486         kfree(ioc->config_cmds.reply);
7487 }
7488
7489 /**
7490  * _base_pre_reset_handler - pre reset handler
7491  * @ioc: per adapter object
7492  */
7493 static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
7494 {
7495         mpt3sas_scsih_pre_reset_handler(ioc);
7496         mpt3sas_ctl_pre_reset_handler(ioc);
7497         dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
7498 }
7499
7500 /**
7501  * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
7502  * @ioc: per adapter object
7503  */
7504 static void
7505 _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
7506 {
7507         dtmprintk(ioc,
7508             ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
7509         if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
7510                 ioc->transport_cmds.status |= MPT3_CMD_RESET;
7511                 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
7512                 complete(&ioc->transport_cmds.done);
7513         }
7514         if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
7515                 ioc->base_cmds.status |= MPT3_CMD_RESET;
7516                 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
7517                 complete(&ioc->base_cmds.done);
7518         }
7519         if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7520                 ioc->port_enable_failed = 1;
7521                 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
7522                 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
7523                 if (ioc->is_driver_loading) {
7524                         ioc->start_scan_failed =
7525                                 MPI2_IOCSTATUS_INTERNAL_ERROR;
7526                         ioc->start_scan = 0;
7527                         ioc->port_enable_cmds.status =
7528                                 MPT3_CMD_NOT_USED;
7529                 } else {
7530                         complete(&ioc->port_enable_cmds.done);
7531                 }
7532         }
7533         if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
7534                 ioc->config_cmds.status |= MPT3_CMD_RESET;
7535                 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
7536                 ioc->config_cmds.smid = USHRT_MAX;
7537                 complete(&ioc->config_cmds.done);
7538         }
7539 }
7540
7541 /**
7542  * _base_clear_outstanding_commands - clear all outstanding commands
7543  * @ioc: per adapter object
7544  */
7545 static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
7546 {
7547         mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
7548         mpt3sas_ctl_clear_outstanding_ioctls(ioc);
7549         _base_clear_outstanding_mpt_commands(ioc);
7550 }
7551
7552 /**
7553  * _base_reset_done_handler - reset done handler
7554  * @ioc: per adapter object
7555  */
7556 static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
7557 {
7558         mpt3sas_scsih_reset_done_handler(ioc);
7559         mpt3sas_ctl_reset_done_handler(ioc);
7560         dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
7561 }
7562
7563 /**
7564  * mpt3sas_wait_for_commands_to_complete - reset controller
7565  * @ioc: Pointer to MPT_ADAPTER structure
7566  *
7567  * This function is waiting 10s for all pending commands to complete
7568  * prior to putting controller in reset.
7569  */
7570 void
7571 mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
7572 {
7573         u32 ioc_state;
7574
7575         ioc->pending_io_count = 0;
7576
7577         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7578         if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
7579                 return;
7580
7581         /* pending command count */
7582         ioc->pending_io_count = scsi_host_busy(ioc->shost);
7583
7584         if (!ioc->pending_io_count)
7585                 return;
7586
7587         /* wait for pending commands to complete */
7588         wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
7589 }
7590
7591 /**
7592  * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
7593  *     attributes during online firmware upgrade and update the corresponding
7594  *     IOC variables accordingly.
7595  *
7596  * @ioc: Pointer to MPT_ADAPTER structure
7597  */
7598 static int
7599 _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
7600 {
7601         u16 pd_handles_sz;
7602         void *pd_handles = NULL, *blocking_handles = NULL;
7603         void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
7604         struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
7605
7606         if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
7607                 pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
7608                 if (ioc->facts.MaxDevHandle % 8)
7609                         pd_handles_sz++;
7610
7611                 pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
7612                     GFP_KERNEL);
7613                 if (!pd_handles) {
7614                         ioc_info(ioc,
7615                             "Unable to allocate the memory for pd_handles of sz: %d\n",
7616                             pd_handles_sz);
7617                         return -ENOMEM;
7618                 }
7619                 memset(pd_handles + ioc->pd_handles_sz, 0,
7620                     (pd_handles_sz - ioc->pd_handles_sz));
7621                 ioc->pd_handles = pd_handles;
7622
7623                 blocking_handles = krealloc(ioc->blocking_handles,
7624                     pd_handles_sz, GFP_KERNEL);
7625                 if (!blocking_handles) {
7626                         ioc_info(ioc,
7627                             "Unable to allocate the memory for "
7628                             "blocking_handles of sz: %d\n",
7629                             pd_handles_sz);
7630                         return -ENOMEM;
7631                 }
7632                 memset(blocking_handles + ioc->pd_handles_sz, 0,
7633                     (pd_handles_sz - ioc->pd_handles_sz));
7634                 ioc->blocking_handles = blocking_handles;
7635                 ioc->pd_handles_sz = pd_handles_sz;
7636
7637                 pend_os_device_add = krealloc(ioc->pend_os_device_add,
7638                     pd_handles_sz, GFP_KERNEL);
7639                 if (!pend_os_device_add) {
7640                         ioc_info(ioc,
7641                             "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
7642                             pd_handles_sz);
7643                         return -ENOMEM;
7644                 }
7645                 memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
7646                     (pd_handles_sz - ioc->pend_os_device_add_sz));
7647                 ioc->pend_os_device_add = pend_os_device_add;
7648                 ioc->pend_os_device_add_sz = pd_handles_sz;
7649
7650                 device_remove_in_progress = krealloc(
7651                     ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
7652                 if (!device_remove_in_progress) {
7653                         ioc_info(ioc,
7654                             "Unable to allocate the memory for "
7655                             "device_remove_in_progress of sz: %d\n "
7656                             , pd_handles_sz);
7657                         return -ENOMEM;
7658                 }
7659                 memset(device_remove_in_progress +
7660                     ioc->device_remove_in_progress_sz, 0,
7661                     (pd_handles_sz - ioc->device_remove_in_progress_sz));
7662                 ioc->device_remove_in_progress = device_remove_in_progress;
7663                 ioc->device_remove_in_progress_sz = pd_handles_sz;
7664         }
7665
7666         memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
7667         return 0;
7668 }
7669
7670 /**
7671  * mpt3sas_base_hard_reset_handler - reset controller
7672  * @ioc: Pointer to MPT_ADAPTER structure
7673  * @type: FORCE_BIG_HAMMER or SOFT_RESET
7674  *
7675  * Return: 0 for success, non-zero for failure.
7676  */
7677 int
7678 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
7679         enum reset_type type)
7680 {
7681         int r;
7682         unsigned long flags;
7683         u32 ioc_state;
7684         u8 is_fault = 0, is_trigger = 0;
7685
7686         dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
7687
7688         if (ioc->pci_error_recovery) {
7689                 ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
7690                 r = 0;
7691                 goto out_unlocked;
7692         }
7693
7694         if (mpt3sas_fwfault_debug)
7695                 mpt3sas_halt_firmware(ioc);
7696
7697         /* wait for an active reset in progress to complete */
7698         mutex_lock(&ioc->reset_in_progress_mutex);
7699
7700         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
7701         ioc->shost_recovery = 1;
7702         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
7703
7704         if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
7705             MPT3_DIAG_BUFFER_IS_REGISTERED) &&
7706             (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
7707             MPT3_DIAG_BUFFER_IS_RELEASED))) {
7708                 is_trigger = 1;
7709                 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7710                 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
7711                     (ioc_state & MPI2_IOC_STATE_MASK) ==
7712                     MPI2_IOC_STATE_COREDUMP)
7713                         is_fault = 1;
7714         }
7715         _base_pre_reset_handler(ioc);
7716         mpt3sas_wait_for_commands_to_complete(ioc);
7717         _base_mask_interrupts(ioc);
7718         r = _base_make_ioc_ready(ioc, type);
7719         if (r)
7720                 goto out;
7721         _base_clear_outstanding_commands(ioc);
7722
7723         /* If this hard reset is called while port enable is active, then
7724          * there is no reason to call make_ioc_operational
7725          */
7726         if (ioc->is_driver_loading && ioc->port_enable_failed) {
7727                 ioc->remove_host = 1;
7728                 r = -EFAULT;
7729                 goto out;
7730         }
7731         r = _base_get_ioc_facts(ioc);
7732         if (r)
7733                 goto out;
7734
7735         r = _base_check_ioc_facts_changes(ioc);
7736         if (r) {
7737                 ioc_info(ioc,
7738                     "Some of the parameters got changed in this new firmware"
7739                     " image and it requires system reboot\n");
7740                 goto out;
7741         }
7742         if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
7743                 panic("%s: Issue occurred with flashing controller firmware."
7744                       "Please reboot the system and ensure that the correct"
7745                       " firmware version is running\n", ioc->name);
7746
7747         r = _base_make_ioc_operational(ioc);
7748         if (!r)
7749                 _base_reset_done_handler(ioc);
7750
7751  out:
7752         ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
7753
7754         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
7755         ioc->shost_recovery = 0;
7756         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
7757         ioc->ioc_reset_count++;
7758         mutex_unlock(&ioc->reset_in_progress_mutex);
7759
7760  out_unlocked:
7761         if ((r == 0) && is_trigger) {
7762                 if (is_fault)
7763                         mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
7764                 else
7765                         mpt3sas_trigger_master(ioc,
7766                             MASTER_TRIGGER_ADAPTER_RESET);
7767         }
7768         dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
7769         return r;
7770 }